mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-02 03:08:15 +00:00
Add Godeps for the Prometheus monitoring client library and its dependencies.
See issue #1625 for discussion.
This commit is contained in:
43
Godeps/Godeps.json
generated
43
Godeps/Godeps.json
generated
@@ -29,6 +29,11 @@
|
||||
"Comment": "release-96",
|
||||
"Rev": "98c78185197025f935947caac56a7b6d022f89d2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "code.google.com/p/goprotobuf/proto",
|
||||
"Comment": "go.r60-163",
|
||||
"Rev": "9352842ae63ee1d7e74e074ce7bb10370c4b6b9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Sirupsen/logrus",
|
||||
"Comment": "v0.6.2-10-g51fe59a",
|
||||
@@ -127,6 +132,10 @@
|
||||
"Comment": "0.1.3-8-g6633656",
|
||||
"Rev": "6633656539c1639d9d78127b7d47c622b5d7b6dc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/ext",
|
||||
"Rev": "7a864a042e844af638df17ebbabf8183dace556a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/miekg/dns",
|
||||
"Rev": "3f504e8dabd5d562e997d19ce0200aa41973e1b2"
|
||||
@@ -153,6 +162,40 @@
|
||||
"Comment": "v1.0-28-g8adf9e1730c5",
|
||||
"Rev": "8adf9e1730c55cdc590de7d49766cb2acc88d8f2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_golang/_vendor/goautoneg",
|
||||
"Comment": "0.1.0-9-g52186fc",
|
||||
"Rev": "52186fc518809dc9a56502348751e353866b2059"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_golang/_vendor/perks/quantile",
|
||||
"Comment": "0.1.0-9-g52186fc",
|
||||
"Rev": "52186fc518809dc9a56502348751e353866b2059"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_golang/model",
|
||||
"Comment": "0.1.0-9-g52186fc",
|
||||
"Rev": "52186fc518809dc9a56502348751e353866b2059"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_golang/prometheus",
|
||||
"Comment": "0.1.0-9-g52186fc",
|
||||
"Rev": "52186fc518809dc9a56502348751e353866b2059"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_golang/text",
|
||||
"Comment": "0.1.0-9-g52186fc",
|
||||
"Rev": "52186fc518809dc9a56502348751e353866b2059"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_model/go",
|
||||
"Comment": "model-0.0.2-10-gbc9454c",
|
||||
"Rev": "bc9454ca562dc050e060ea61a1c0e562a189850f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/procfs",
|
||||
"Rev": "92faa308558161acab0ada1db048e9996ecec160"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/racker/perigee",
|
||||
"Comment": "v0.0.0-18-g0c00cb0",
|
||||
|
||||
40
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/Makefile
generated
vendored
Normal file
40
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/Makefile
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
# Go support for Protocol Buffers - Google's data interchange format
|
||||
#
|
||||
# Copyright 2010 The Go Authors. All rights reserved.
|
||||
# http://code.google.com/p/goprotobuf/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
install:
|
||||
go install
|
||||
|
||||
test: install generate-test-pbs
|
||||
go test
|
||||
|
||||
|
||||
generate-test-pbs:
|
||||
make install && cd testdata && make
|
||||
1979
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/all_test.go
generated
vendored
Normal file
1979
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/all_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
174
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/clone.go
generated
vendored
Normal file
174
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/clone.go
generated
vendored
Normal file
@@ -0,0 +1,174 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// http://code.google.com/p/goprotobuf/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer deep copy.
|
||||
// TODO: MessageSet and RawMessage.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Clone returns a deep copy of a protocol buffer.
|
||||
func Clone(pb Message) Message {
|
||||
in := reflect.ValueOf(pb)
|
||||
if in.IsNil() {
|
||||
return pb
|
||||
}
|
||||
|
||||
out := reflect.New(in.Type().Elem())
|
||||
// out is empty so a merge is a deep copy.
|
||||
mergeStruct(out.Elem(), in.Elem())
|
||||
return out.Interface().(Message)
|
||||
}
|
||||
|
||||
// Merge merges src into dst.
|
||||
// Required and optional fields that are set in src will be set to that value in dst.
|
||||
// Elements of repeated fields will be appended.
|
||||
// Merge panics if src and dst are not the same type, or if dst is nil.
|
||||
func Merge(dst, src Message) {
|
||||
in := reflect.ValueOf(src)
|
||||
out := reflect.ValueOf(dst)
|
||||
if out.IsNil() {
|
||||
panic("proto: nil destination")
|
||||
}
|
||||
if in.Type() != out.Type() {
|
||||
// Explicit test prior to mergeStruct so that mistyped nils will fail
|
||||
panic("proto: type mismatch")
|
||||
}
|
||||
if in.IsNil() {
|
||||
// Merging nil into non-nil is a quiet no-op
|
||||
return
|
||||
}
|
||||
mergeStruct(out.Elem(), in.Elem())
|
||||
}
|
||||
|
||||
func mergeStruct(out, in reflect.Value) {
|
||||
for i := 0; i < in.NumField(); i++ {
|
||||
f := in.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
mergeAny(out.Field(i), in.Field(i))
|
||||
}
|
||||
|
||||
if emIn, ok := in.Addr().Interface().(extendableProto); ok {
|
||||
emOut := out.Addr().Interface().(extendableProto)
|
||||
mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
|
||||
}
|
||||
|
||||
uf := in.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return
|
||||
}
|
||||
uin := uf.Bytes()
|
||||
if len(uin) > 0 {
|
||||
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
|
||||
}
|
||||
}
|
||||
|
||||
func mergeAny(out, in reflect.Value) {
|
||||
if in.Type() == protoMessageType {
|
||||
if !in.IsNil() {
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
|
||||
} else {
|
||||
Merge(out.Interface().(Message), in.Interface().(Message))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
switch in.Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
out.Set(in)
|
||||
case reflect.Ptr:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.New(in.Elem().Type()))
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem())
|
||||
case reflect.Slice:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if in.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// []byte is a scalar bytes field, not a repeated field.
|
||||
// Make a deep copy.
|
||||
// Append to []byte{} instead of []byte(nil) so that we never end up
|
||||
// with a nil result.
|
||||
out.SetBytes(append([]byte{}, in.Bytes()...))
|
||||
return
|
||||
}
|
||||
n := in.Len()
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeSlice(in.Type(), 0, n))
|
||||
}
|
||||
switch in.Type().Elem().Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
out.Set(reflect.AppendSlice(out, in))
|
||||
default:
|
||||
for i := 0; i < n; i++ {
|
||||
x := reflect.Indirect(reflect.New(in.Type().Elem()))
|
||||
mergeAny(x, in.Index(i))
|
||||
out.Set(reflect.Append(out, x))
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
mergeStruct(out, in)
|
||||
default:
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to copy %v", in)
|
||||
}
|
||||
}
|
||||
|
||||
func mergeExtension(out, in map[int32]Extension) {
|
||||
for extNum, eIn := range in {
|
||||
eOut := Extension{desc: eIn.desc}
|
||||
if eIn.value != nil {
|
||||
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
|
||||
mergeAny(v, reflect.ValueOf(eIn.value))
|
||||
eOut.value = v.Interface()
|
||||
}
|
||||
if eIn.enc != nil {
|
||||
eOut.enc = make([]byte, len(eIn.enc))
|
||||
copy(eOut.enc, eIn.enc)
|
||||
}
|
||||
|
||||
out[extNum] = eOut
|
||||
}
|
||||
}
|
||||
202
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/clone_test.go
generated
vendored
Normal file
202
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/clone_test.go
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// http://code.google.com/p/goprotobuf/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"code.google.com/p/goprotobuf/proto"
|
||||
|
||||
pb "./testdata"
|
||||
)
|
||||
|
||||
var cloneTestMessage = &pb.MyMessage{
|
||||
Count: proto.Int32(42),
|
||||
Name: proto.String("Dave"),
|
||||
Pet: []string{"bunny", "kitty", "horsey"},
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("niles"),
|
||||
Port: proto.Int32(9099),
|
||||
Connected: proto.Bool(true),
|
||||
},
|
||||
Others: []*pb.OtherMessage{
|
||||
{
|
||||
Value: []byte("some bytes"),
|
||||
},
|
||||
},
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: proto.Int32(6),
|
||||
},
|
||||
RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
|
||||
}
|
||||
|
||||
func init() {
|
||||
ext := &pb.Ext{
|
||||
Data: proto.String("extension"),
|
||||
}
|
||||
if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil {
|
||||
panic("SetExtension: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestClone(t *testing.T) {
|
||||
m := proto.Clone(cloneTestMessage).(*pb.MyMessage)
|
||||
if !proto.Equal(m, cloneTestMessage) {
|
||||
t.Errorf("Clone(%v) = %v", cloneTestMessage, m)
|
||||
}
|
||||
|
||||
// Verify it was a deep copy.
|
||||
*m.Inner.Port++
|
||||
if proto.Equal(m, cloneTestMessage) {
|
||||
t.Error("Mutating clone changed the original")
|
||||
}
|
||||
// Byte fields and repeated fields should be copied.
|
||||
if &m.Pet[0] == &cloneTestMessage.Pet[0] {
|
||||
t.Error("Pet: repeated field not copied")
|
||||
}
|
||||
if &m.Others[0] == &cloneTestMessage.Others[0] {
|
||||
t.Error("Others: repeated field not copied")
|
||||
}
|
||||
if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] {
|
||||
t.Error("Others[0].Value: bytes field not copied")
|
||||
}
|
||||
if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] {
|
||||
t.Error("RepBytes: repeated field not copied")
|
||||
}
|
||||
if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] {
|
||||
t.Error("RepBytes[0]: bytes field not copied")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloneNil(t *testing.T) {
|
||||
var m *pb.MyMessage
|
||||
if c := proto.Clone(m); !proto.Equal(m, c) {
|
||||
t.Errorf("Clone(%v) = %v", m, c)
|
||||
}
|
||||
}
|
||||
|
||||
var mergeTests = []struct {
|
||||
src, dst, want proto.Message
|
||||
}{
|
||||
{
|
||||
src: &pb.MyMessage{
|
||||
Count: proto.Int32(42),
|
||||
},
|
||||
dst: &pb.MyMessage{
|
||||
Name: proto.String("Dave"),
|
||||
},
|
||||
want: &pb.MyMessage{
|
||||
Count: proto.Int32(42),
|
||||
Name: proto.String("Dave"),
|
||||
},
|
||||
},
|
||||
{
|
||||
src: &pb.MyMessage{
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("hey"),
|
||||
Connected: proto.Bool(true),
|
||||
},
|
||||
Pet: []string{"horsey"},
|
||||
Others: []*pb.OtherMessage{
|
||||
{
|
||||
Value: []byte("some bytes"),
|
||||
},
|
||||
},
|
||||
},
|
||||
dst: &pb.MyMessage{
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("niles"),
|
||||
Port: proto.Int32(9099),
|
||||
},
|
||||
Pet: []string{"bunny", "kitty"},
|
||||
Others: []*pb.OtherMessage{
|
||||
{
|
||||
Key: proto.Int64(31415926535),
|
||||
},
|
||||
{
|
||||
// Explicitly test a src=nil field
|
||||
Inner: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: &pb.MyMessage{
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("hey"),
|
||||
Connected: proto.Bool(true),
|
||||
Port: proto.Int32(9099),
|
||||
},
|
||||
Pet: []string{"bunny", "kitty", "horsey"},
|
||||
Others: []*pb.OtherMessage{
|
||||
{
|
||||
Key: proto.Int64(31415926535),
|
||||
},
|
||||
{},
|
||||
{
|
||||
Value: []byte("some bytes"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
src: &pb.MyMessage{
|
||||
RepBytes: [][]byte{[]byte("wow")},
|
||||
},
|
||||
dst: &pb.MyMessage{
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: proto.Int32(6),
|
||||
},
|
||||
RepBytes: [][]byte{[]byte("sham")},
|
||||
},
|
||||
want: &pb.MyMessage{
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: proto.Int32(6),
|
||||
},
|
||||
RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
|
||||
},
|
||||
},
|
||||
// Check that a scalar bytes field replaces rather than appends.
|
||||
{
|
||||
src: &pb.OtherMessage{Value: []byte("foo")},
|
||||
dst: &pb.OtherMessage{Value: []byte("bar")},
|
||||
want: &pb.OtherMessage{Value: []byte("foo")},
|
||||
},
|
||||
}
|
||||
|
||||
func TestMerge(t *testing.T) {
|
||||
for _, m := range mergeTests {
|
||||
got := proto.Clone(m.dst)
|
||||
proto.Merge(got, m.src)
|
||||
if !proto.Equal(got, m.want) {
|
||||
t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
721
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/decode.go
generated
vendored
Normal file
721
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/decode.go
generated
vendored
Normal file
@@ -0,0 +1,721 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// http://code.google.com/p/goprotobuf/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for decoding protocol buffer data to construct in-memory representations.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// errOverflow is returned when an integer is too large to be represented.
|
||||
var errOverflow = errors.New("proto: integer overflow")
|
||||
|
||||
// The fundamental decoders that interpret bytes on the wire.
|
||||
// Those that take integer types all return uint64 and are
|
||||
// therefore of type valueDecoder.
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the slice.
|
||||
// It returns the integer and the number of bytes consumed, or
|
||||
// zero if there is not enough.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||
// x, n already 0
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if n >= len(buf) {
|
||||
return 0, 0
|
||||
}
|
||||
b := uint64(buf[n])
|
||||
n++
|
||||
x |= (b & 0x7F) << shift
|
||||
if (b & 0x80) == 0 {
|
||||
return x, n
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
|
||||
i := p.index
|
||||
l := len(p.buf)
|
||||
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if i >= l {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
b := p.buf[i]
|
||||
i++
|
||||
x |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
p.index = i
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
err = errOverflow
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 8
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-8])
|
||||
x |= uint64(p.buf[i-7]) << 8
|
||||
x |= uint64(p.buf[i-6]) << 16
|
||||
x |= uint64(p.buf[i-5]) << 24
|
||||
x |= uint64(p.buf[i-4]) << 32
|
||||
x |= uint64(p.buf[i-3]) << 40
|
||||
x |= uint64(p.buf[i-2]) << 48
|
||||
x |= uint64(p.buf[i-1]) << 56
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeFixed32 reads a 32-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed32, sfixed32, and float protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 4
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-4])
|
||||
x |= uint64(p.buf[i-3]) << 8
|
||||
x |= uint64(p.buf[i-2]) << 16
|
||||
x |= uint64(p.buf[i-1]) << 24
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint32 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
|
||||
return
|
||||
}
|
||||
|
||||
// These are not ValueDecoders: they produce an array of bytes or a string.
|
||||
// bytes, embedded messages
|
||||
|
||||
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
|
||||
// This is the format used for the bytes protocol buffer
|
||||
// type and for embedded messages.
|
||||
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
|
||||
n, err := p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
nb := int(n)
|
||||
if nb < 0 {
|
||||
return nil, fmt.Errorf("proto: bad byte length %d", nb)
|
||||
}
|
||||
end := p.index + nb
|
||||
if end < p.index || end > len(p.buf) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
if !alloc {
|
||||
// todo: check if can get more uses of alloc=false
|
||||
buf = p.buf[p.index:end]
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
buf = make([]byte, nb)
|
||||
copy(buf, p.buf[p.index:])
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeStringBytes reads an encoded string from the Buffer.
|
||||
// This is the format used for the proto2 string type.
|
||||
func (p *Buffer) DecodeStringBytes() (s string, err error) {
|
||||
buf, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
|
||||
// If the protocol buffer has extensions, and the field matches, add it as an extension.
|
||||
// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
|
||||
func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
|
||||
oi := o.index
|
||||
|
||||
err := o.skip(t, tag, wire)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !unrecField.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
ptr := structPointer_Bytes(base, unrecField)
|
||||
|
||||
// Add the skipped field to struct field
|
||||
obuf := o.buf
|
||||
|
||||
o.buf = *ptr
|
||||
o.EncodeVarint(uint64(tag<<3 | wire))
|
||||
*ptr = append(o.buf, obuf[oi:o.index]...)
|
||||
|
||||
o.buf = obuf
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
|
||||
func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
|
||||
|
||||
var u uint64
|
||||
var err error
|
||||
|
||||
switch wire {
|
||||
case WireVarint:
|
||||
_, err = o.DecodeVarint()
|
||||
case WireFixed64:
|
||||
_, err = o.DecodeFixed64()
|
||||
case WireBytes:
|
||||
_, err = o.DecodeRawBytes(false)
|
||||
case WireFixed32:
|
||||
_, err = o.DecodeFixed32()
|
||||
case WireStartGroup:
|
||||
for {
|
||||
u, err = o.DecodeVarint()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
fwire := int(u & 0x7)
|
||||
if fwire == WireEndGroup {
|
||||
break
|
||||
}
|
||||
ftag := int(u >> 3)
|
||||
err = o.skip(t, ftag, fwire)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The method should reset the receiver before
|
||||
// decoding starts. The argument points to data that may be
|
||||
// overwritten, so implementations should not keep references to the
|
||||
// buffer.
|
||||
type Unmarshaler interface {
|
||||
Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in buf and places the
|
||||
// decoded result in pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// Unmarshal resets pb before starting to unmarshal, so any
|
||||
// existing data in pb is always removed. Use UnmarshalMerge
|
||||
// to preserve and append to existing data.
|
||||
func Unmarshal(buf []byte, pb Message) error {
|
||||
pb.Reset()
|
||||
return UnmarshalMerge(buf, pb)
|
||||
}
|
||||
|
||||
// UnmarshalMerge parses the protocol buffer representation in buf and
|
||||
// writes the decoded result to pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// UnmarshalMerge merges into existing data in pb.
|
||||
// Most code should use Unmarshal instead.
|
||||
func UnmarshalMerge(buf []byte, pb Message) error {
|
||||
// If the object can unmarshal itself, let it.
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in the
|
||||
// Buffer and places the decoded result in pb. If the struct
|
||||
// underlying pb does not match the data in the buffer, the results can be
|
||||
// unpredictable.
|
||||
func (p *Buffer) Unmarshal(pb Message) error {
|
||||
// If the object can unmarshal itself, let it.
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
err := u.Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
typ, base, err := getbase(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
|
||||
|
||||
if collectStats {
|
||||
stats.Decode++
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// unmarshalType does the work of unmarshaling a structure.
|
||||
func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
|
||||
var state errorState
|
||||
required, reqFields := prop.reqCount, uint64(0)
|
||||
|
||||
var err error
|
||||
for err == nil && o.index < len(o.buf) {
|
||||
oi := o.index
|
||||
var u uint64
|
||||
u, err = o.DecodeVarint()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
wire := int(u & 0x7)
|
||||
if wire == WireEndGroup {
|
||||
if is_group {
|
||||
return nil // input is satisfied
|
||||
}
|
||||
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
|
||||
}
|
||||
tag := int(u >> 3)
|
||||
if tag <= 0 {
|
||||
return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
|
||||
}
|
||||
fieldnum, ok := prop.decoderTags.get(tag)
|
||||
if !ok {
|
||||
// Maybe it's an extension?
|
||||
if prop.extendable {
|
||||
if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
|
||||
if err = o.skip(st, tag, wire); err == nil {
|
||||
ext := e.ExtensionMap()[int32(tag)] // may be missing
|
||||
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
|
||||
e.ExtensionMap()[int32(tag)] = ext
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
|
||||
continue
|
||||
}
|
||||
p := prop.Prop[fieldnum]
|
||||
|
||||
if p.dec == nil {
|
||||
fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
|
||||
continue
|
||||
}
|
||||
dec := p.dec
|
||||
if wire != WireStartGroup && wire != p.WireType {
|
||||
if wire == WireBytes && p.packedDec != nil {
|
||||
// a packable field
|
||||
dec = p.packedDec
|
||||
} else {
|
||||
err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
|
||||
continue
|
||||
}
|
||||
}
|
||||
decErr := dec(o, p, base)
|
||||
if decErr != nil && !state.shouldContinue(decErr, p) {
|
||||
err = decErr
|
||||
}
|
||||
if err == nil && p.Required {
|
||||
// Successfully decoded a required field.
|
||||
if tag <= 64 {
|
||||
// use bitmap for fields 1-64 to catch field reuse.
|
||||
var mask uint64 = 1 << uint64(tag-1)
|
||||
if reqFields&mask == 0 {
|
||||
// new required field
|
||||
reqFields |= mask
|
||||
required--
|
||||
}
|
||||
} else {
|
||||
// This is imprecise. It can be fooled by a required field
|
||||
// with a tag > 64 that is encoded twice; that's very rare.
|
||||
// A fully correct implementation would require allocating
|
||||
// a data structure, which we would like to avoid.
|
||||
required--
|
||||
}
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
if is_group {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if state.err != nil {
|
||||
return state.err
|
||||
}
|
||||
if required > 0 {
|
||||
// Not enough information to determine the exact field. If we use extra
|
||||
// CPU, we could determine the field only if the missing required field
|
||||
// has a tag <= 64 and we check reqFields.
|
||||
return &RequiredNotSetError{"{Unknown}"}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Individual type decoders
|
||||
// For each,
|
||||
// u is the decoded value,
|
||||
// v is a pointer to the field (pointer) in the struct
|
||||
|
||||
// Sizes of the pools to allocate inside the Buffer.
|
||||
// The goal is modest amortization and allocation
|
||||
// on at least 16-byte boundaries.
|
||||
const (
|
||||
boolPoolSize = 16
|
||||
uint32PoolSize = 8
|
||||
uint64PoolSize = 4
|
||||
)
|
||||
|
||||
// Decode a bool.
|
||||
func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(o.bools) == 0 {
|
||||
o.bools = make([]bool, boolPoolSize)
|
||||
}
|
||||
o.bools[0] = u != 0
|
||||
*structPointer_Bool(base, p.field) = &o.bools[0]
|
||||
o.bools = o.bools[1:]
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode an int32.
|
||||
func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode an int64.
|
||||
func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
word64_Set(structPointer_Word64(base, p.field), o, u)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a string.
|
||||
func (o *Buffer) dec_string(p *Properties, base structPointer) error {
|
||||
s, err := o.DecodeStringBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sp := new(string)
|
||||
*sp = s
|
||||
*structPointer_String(base, p.field) = sp
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of bytes ([]byte).
|
||||
func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
|
||||
b, err := o.DecodeRawBytes(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*structPointer_Bytes(base, p.field) = b
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of bools ([]bool).
|
||||
func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v := structPointer_BoolSlice(base, p.field)
|
||||
*v = append(*v, u != 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of bools ([]bool) in packed format.
|
||||
func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
|
||||
v := structPointer_BoolSlice(base, p.field)
|
||||
|
||||
nn, err := o.DecodeVarint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nb := int(nn) // number of bytes of encoded bools
|
||||
|
||||
y := *v
|
||||
for i := 0; i < nb; i++ {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
y = append(y, u != 0)
|
||||
}
|
||||
|
||||
*v = y
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of int32s ([]int32).
|
||||
func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
structPointer_Word32Slice(base, p.field).Append(uint32(u))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of int32s ([]int32) in packed format.
|
||||
func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
|
||||
v := structPointer_Word32Slice(base, p.field)
|
||||
|
||||
nn, err := o.DecodeVarint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nb := int(nn) // number of bytes of encoded int32s
|
||||
|
||||
fin := o.index + nb
|
||||
if fin < o.index {
|
||||
return errOverflow
|
||||
}
|
||||
for o.index < fin {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Append(uint32(u))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of int64s ([]int64).
|
||||
func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
structPointer_Word64Slice(base, p.field).Append(u)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of int64s ([]int64) in packed format.
|
||||
func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
|
||||
v := structPointer_Word64Slice(base, p.field)
|
||||
|
||||
nn, err := o.DecodeVarint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nb := int(nn) // number of bytes of encoded int64s
|
||||
|
||||
fin := o.index + nb
|
||||
if fin < o.index {
|
||||
return errOverflow
|
||||
}
|
||||
for o.index < fin {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Append(u)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of strings ([]string).
|
||||
func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
|
||||
s, err := o.DecodeStringBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v := structPointer_StringSlice(base, p.field)
|
||||
*v = append(*v, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of slice of bytes ([][]byte).
|
||||
func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
|
||||
b, err := o.DecodeRawBytes(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v := structPointer_BytesSlice(base, p.field)
|
||||
*v = append(*v, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a group.
|
||||
func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
|
||||
bas := structPointer_GetStructPointer(base, p.field)
|
||||
if structPointer_IsNil(bas) {
|
||||
// allocate new nested message
|
||||
bas = toStructPointer(reflect.New(p.stype))
|
||||
structPointer_SetStructPointer(base, p.field, bas)
|
||||
}
|
||||
return o.unmarshalType(p.stype, p.sprop, true, bas)
|
||||
}
|
||||
|
||||
// Decode an embedded message.
|
||||
func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
|
||||
raw, e := o.DecodeRawBytes(false)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
|
||||
bas := structPointer_GetStructPointer(base, p.field)
|
||||
if structPointer_IsNil(bas) {
|
||||
// allocate new nested message
|
||||
bas = toStructPointer(reflect.New(p.stype))
|
||||
structPointer_SetStructPointer(base, p.field, bas)
|
||||
}
|
||||
|
||||
// If the object can unmarshal itself, let it.
|
||||
if p.isUnmarshaler {
|
||||
iv := structPointer_Interface(bas, p.stype)
|
||||
return iv.(Unmarshaler).Unmarshal(raw)
|
||||
}
|
||||
|
||||
obuf := o.buf
|
||||
oi := o.index
|
||||
o.buf = raw
|
||||
o.index = 0
|
||||
|
||||
err = o.unmarshalType(p.stype, p.sprop, false, bas)
|
||||
o.buf = obuf
|
||||
o.index = oi
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode a slice of embedded messages.
|
||||
func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
|
||||
return o.dec_slice_struct(p, false, base)
|
||||
}
|
||||
|
||||
// Decode a slice of embedded groups.
|
||||
func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
|
||||
return o.dec_slice_struct(p, true, base)
|
||||
}
|
||||
|
||||
// Decode a slice of structs ([]*struct).
|
||||
func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
|
||||
v := reflect.New(p.stype)
|
||||
bas := toStructPointer(v)
|
||||
structPointer_StructPointerSlice(base, p.field).Append(bas)
|
||||
|
||||
if is_group {
|
||||
err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
|
||||
return err
|
||||
}
|
||||
|
||||
raw, err := o.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the object can unmarshal itself, let it.
|
||||
if p.isUnmarshaler {
|
||||
iv := v.Interface()
|
||||
return iv.(Unmarshaler).Unmarshal(raw)
|
||||
}
|
||||
|
||||
obuf := o.buf
|
||||
oi := o.index
|
||||
o.buf = raw
|
||||
o.index = 0
|
||||
|
||||
err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
|
||||
|
||||
o.buf = obuf
|
||||
o.index = oi
|
||||
|
||||
return err
|
||||
}
|
||||
1054
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/encode.go
generated
vendored
Normal file
1054
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/encode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
241
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/equal.go
generated
vendored
Normal file
241
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/equal.go
generated
vendored
Normal file
@@ -0,0 +1,241 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// http://code.google.com/p/goprotobuf/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer comparison.
|
||||
// TODO: MessageSet.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
/*
|
||||
Equal returns true iff protocol buffers a and b are equal.
|
||||
The arguments must both be pointers to protocol buffer structs.
|
||||
|
||||
Equality is defined in this way:
|
||||
- Two messages are equal iff they are the same type,
|
||||
corresponding fields are equal, unknown field sets
|
||||
are equal, and extensions sets are equal.
|
||||
- Two set scalar fields are equal iff their values are equal.
|
||||
If the fields are of a floating-point type, remember that
|
||||
NaN != x for all x, including NaN.
|
||||
- Two repeated fields are equal iff their lengths are the same,
|
||||
and their corresponding elements are equal (a "bytes" field,
|
||||
although represented by []byte, is not a repeated field)
|
||||
- Two unset fields are equal.
|
||||
- Two unknown field sets are equal if their current
|
||||
encoded state is equal.
|
||||
- Two extension sets are equal iff they have corresponding
|
||||
elements that are pairwise equal.
|
||||
- Every other combination of things are not equal.
|
||||
|
||||
The return value is undefined if a and b are not protocol buffers.
|
||||
*/
|
||||
func Equal(a, b Message) bool {
|
||||
if a == nil || b == nil {
|
||||
return a == b
|
||||
}
|
||||
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
|
||||
if v1.Type() != v2.Type() {
|
||||
return false
|
||||
}
|
||||
if v1.Kind() == reflect.Ptr {
|
||||
if v1.IsNil() {
|
||||
return v2.IsNil()
|
||||
}
|
||||
if v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
v1, v2 = v1.Elem(), v2.Elem()
|
||||
}
|
||||
if v1.Kind() != reflect.Struct {
|
||||
return false
|
||||
}
|
||||
return equalStruct(v1, v2)
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
func equalStruct(v1, v2 reflect.Value) bool {
|
||||
for i := 0; i < v1.NumField(); i++ {
|
||||
f := v1.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
f1, f2 := v1.Field(i), v2.Field(i)
|
||||
if f.Type.Kind() == reflect.Ptr {
|
||||
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
|
||||
// both unset
|
||||
continue
|
||||
} else if n1 != n2 {
|
||||
// set/unset mismatch
|
||||
return false
|
||||
}
|
||||
b1, ok := f1.Interface().(raw)
|
||||
if ok {
|
||||
b2 := f2.Interface().(raw)
|
||||
// RawMessage
|
||||
if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
|
||||
return false
|
||||
}
|
||||
continue
|
||||
}
|
||||
f1, f2 = f1.Elem(), f2.Elem()
|
||||
}
|
||||
if !equalAny(f1, f2) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_extensions")
|
||||
if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
uf := v1.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return true
|
||||
}
|
||||
|
||||
u1 := uf.Bytes()
|
||||
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
|
||||
if !bytes.Equal(u1, u2) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
func equalAny(v1, v2 reflect.Value) bool {
|
||||
if v1.Type() == protoMessageType {
|
||||
m1, _ := v1.Interface().(Message)
|
||||
m2, _ := v2.Interface().(Message)
|
||||
return Equal(m1, m2)
|
||||
}
|
||||
switch v1.Kind() {
|
||||
case reflect.Bool:
|
||||
return v1.Bool() == v2.Bool()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v1.Float() == v2.Float()
|
||||
case reflect.Int32, reflect.Int64:
|
||||
return v1.Int() == v2.Int()
|
||||
case reflect.Ptr:
|
||||
return equalAny(v1.Elem(), v2.Elem())
|
||||
case reflect.Slice:
|
||||
if v1.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// short circuit: []byte
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
|
||||
}
|
||||
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < v1.Len(); i++ {
|
||||
if !equalAny(v1.Index(i), v2.Index(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.String:
|
||||
return v1.Interface().(string) == v2.Interface().(string)
|
||||
case reflect.Struct:
|
||||
return equalStruct(v1, v2)
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
return v1.Uint() == v2.Uint()
|
||||
}
|
||||
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to compare %v", v1)
|
||||
return false
|
||||
}
|
||||
|
||||
// base is the struct type that the extensions are based on.
|
||||
// em1 and em2 are extension maps.
|
||||
func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
||||
if len(em1) != len(em2) {
|
||||
return false
|
||||
}
|
||||
|
||||
for extNum, e1 := range em1 {
|
||||
e2, ok := em2[extNum]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
m1, m2 := e1.value, e2.value
|
||||
|
||||
if m1 != nil && m2 != nil {
|
||||
// Both are unencoded.
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {
|
||||
return false
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// At least one is encoded. To do a semantically correct comparison
|
||||
// we need to unmarshal them first.
|
||||
var desc *ExtensionDesc
|
||||
if m := extensionMaps[base]; m != nil {
|
||||
desc = m[extNum]
|
||||
}
|
||||
if desc == nil {
|
||||
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
|
||||
continue
|
||||
}
|
||||
var err error
|
||||
if m1 == nil {
|
||||
m1, err = decodeExtension(e1.enc, desc)
|
||||
}
|
||||
if m2 == nil && err == nil {
|
||||
m2, err = decodeExtension(e2.enc, desc)
|
||||
}
|
||||
if err != nil {
|
||||
// The encoded form is invalid.
|
||||
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
|
||||
return false
|
||||
}
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
166
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/equal_test.go
generated
vendored
Normal file
166
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/equal_test.go
generated
vendored
Normal file
@@ -0,0 +1,166 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// http://code.google.com/p/goprotobuf/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
pb "./testdata"
|
||||
. "code.google.com/p/goprotobuf/proto"
|
||||
)
|
||||
|
||||
// Four identical base messages.
|
||||
// The init function adds extensions to some of them.
|
||||
var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)}
|
||||
var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)}
|
||||
var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)}
|
||||
var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)}
|
||||
|
||||
// Two messages with non-message extensions.
|
||||
var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)}
|
||||
var messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)}
|
||||
|
||||
func init() {
|
||||
ext1 := &pb.Ext{Data: String("Kirk")}
|
||||
ext2 := &pb.Ext{Data: String("Picard")}
|
||||
|
||||
// messageWithExtension1a has ext1, but never marshals it.
|
||||
if err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil {
|
||||
panic("SetExtension on 1a failed: " + err.Error())
|
||||
}
|
||||
|
||||
// messageWithExtension1b is the unmarshaled form of messageWithExtension1a.
|
||||
if err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil {
|
||||
panic("SetExtension on 1b failed: " + err.Error())
|
||||
}
|
||||
buf, err := Marshal(messageWithExtension1b)
|
||||
if err != nil {
|
||||
panic("Marshal of 1b failed: " + err.Error())
|
||||
}
|
||||
messageWithExtension1b.Reset()
|
||||
if err := Unmarshal(buf, messageWithExtension1b); err != nil {
|
||||
panic("Unmarshal of 1b failed: " + err.Error())
|
||||
}
|
||||
|
||||
// messageWithExtension2 has ext2.
|
||||
if err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil {
|
||||
panic("SetExtension on 2 failed: " + err.Error())
|
||||
}
|
||||
|
||||
if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil {
|
||||
panic("SetExtension on Int32-1 failed: " + err.Error())
|
||||
}
|
||||
if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil {
|
||||
panic("SetExtension on Int32-2 failed: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
var EqualTests = []struct {
|
||||
desc string
|
||||
a, b Message
|
||||
exp bool
|
||||
}{
|
||||
{"different types", &pb.GoEnum{}, &pb.GoTestField{}, false},
|
||||
{"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true},
|
||||
{"nil vs nil", nil, nil, true},
|
||||
{"typed nil vs typed nil", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true},
|
||||
{"typed nil vs empty", (*pb.GoEnum)(nil), &pb.GoEnum{}, false},
|
||||
{"different typed nil", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false},
|
||||
|
||||
{"one set field, one unset field", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{}, false},
|
||||
{"one set field zero, one unset field", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false},
|
||||
{"different set fields", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("bar")}, false},
|
||||
{"equal set", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("foo")}, true},
|
||||
|
||||
{"repeated, one set", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false},
|
||||
{"repeated, different length", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false},
|
||||
{"repeated, different value", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false},
|
||||
{"repeated, equal", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true},
|
||||
{"repeated, nil equal nil", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true},
|
||||
{"repeated, nil equal empty", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true},
|
||||
{"repeated, empty equal nil", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true},
|
||||
|
||||
{
|
||||
"nested, different",
|
||||
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("foo")}},
|
||||
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("bar")}},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"nested, equal",
|
||||
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}},
|
||||
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}},
|
||||
true,
|
||||
},
|
||||
|
||||
{"bytes", &pb.OtherMessage{Value: []byte("foo")}, &pb.OtherMessage{Value: []byte("foo")}, true},
|
||||
{"bytes, empty", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true},
|
||||
{"bytes, empty vs nil", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false},
|
||||
{
|
||||
"repeated bytes",
|
||||
&pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}},
|
||||
&pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}},
|
||||
true,
|
||||
},
|
||||
|
||||
{"extension vs. no extension", messageWithoutExtension, messageWithExtension1a, false},
|
||||
{"extension vs. same extension", messageWithExtension1a, messageWithExtension1b, true},
|
||||
{"extension vs. different extension", messageWithExtension1a, messageWithExtension2, false},
|
||||
|
||||
{"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true},
|
||||
{"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false},
|
||||
|
||||
{
|
||||
"message with group",
|
||||
&pb.MyMessage{
|
||||
Count: Int32(1),
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: Int32(5),
|
||||
},
|
||||
},
|
||||
&pb.MyMessage{
|
||||
Count: Int32(1),
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: Int32(5),
|
||||
},
|
||||
},
|
||||
true,
|
||||
},
|
||||
}
|
||||
|
||||
func TestEqual(t *testing.T) {
|
||||
for _, tc := range EqualTests {
|
||||
if res := Equal(tc.a, tc.b); res != tc.exp {
|
||||
t.Errorf("%v: Equal(%v, %v) = %v, want %v", tc.desc, tc.a, tc.b, res, tc.exp)
|
||||
}
|
||||
}
|
||||
}
|
||||
353
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/extensions.go
generated
vendored
Normal file
353
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/extensions.go
generated
vendored
Normal file
@@ -0,0 +1,353 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// http://code.google.com/p/goprotobuf/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Types and routines for supporting protocol buffer extensions.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
|
||||
var ErrMissingExtension = errors.New("proto: missing extension")
|
||||
|
||||
// ExtensionRange represents a range of message extensions for a protocol buffer.
|
||||
// Used in code generated by the protocol compiler.
|
||||
type ExtensionRange struct {
|
||||
Start, End int32 // both inclusive
|
||||
}
|
||||
|
||||
// extendableProto is an interface implemented by any protocol buffer that may be extended.
|
||||
type extendableProto interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
ExtensionMap() map[int32]Extension
|
||||
}
|
||||
|
||||
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
|
||||
|
||||
// ExtensionDesc represents an extension specification.
|
||||
// Used in generated code from the protocol compiler.
|
||||
type ExtensionDesc struct {
|
||||
ExtendedType Message // nil pointer to the type that is being extended
|
||||
ExtensionType interface{} // nil pointer to the extension type
|
||||
Field int32 // field number
|
||||
Name string // fully-qualified name of extension, for text formatting
|
||||
Tag string // protobuf tag style
|
||||
}
|
||||
|
||||
func (ed *ExtensionDesc) repeated() bool {
|
||||
t := reflect.TypeOf(ed.ExtensionType)
|
||||
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
|
||||
}
|
||||
|
||||
// Extension represents an extension in a message.
|
||||
type Extension struct {
|
||||
// When an extension is stored in a message using SetExtension
|
||||
// only desc and value are set. When the message is marshaled
|
||||
// enc will be set to the encoded form of the message.
|
||||
//
|
||||
// When a message is unmarshaled and contains extensions, each
|
||||
// extension will have only enc set. When such an extension is
|
||||
// accessed using GetExtension (or GetExtensions) desc and value
|
||||
// will be set.
|
||||
desc *ExtensionDesc
|
||||
value interface{}
|
||||
enc []byte
|
||||
}
|
||||
|
||||
// SetRawExtension is for testing only.
|
||||
func SetRawExtension(base extendableProto, id int32, b []byte) {
|
||||
base.ExtensionMap()[id] = Extension{enc: b}
|
||||
}
|
||||
|
||||
// isExtensionField returns true iff the given field number is in an extension range.
|
||||
func isExtensionField(pb extendableProto, field int32) bool {
|
||||
for _, er := range pb.ExtensionRangeArray() {
|
||||
if er.Start <= field && field <= er.End {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// checkExtensionTypes checks that the given extension is valid for pb.
|
||||
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
|
||||
// Check the extended type.
|
||||
if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
|
||||
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
|
||||
}
|
||||
// Check the range.
|
||||
if !isExtensionField(pb, extension.Field) {
|
||||
return errors.New("proto: bad extension number; not in declared ranges")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// extPropKey is sufficient to uniquely identify an extension.
|
||||
type extPropKey struct {
|
||||
base reflect.Type
|
||||
field int32
|
||||
}
|
||||
|
||||
var extProp = struct {
|
||||
sync.RWMutex
|
||||
m map[extPropKey]*Properties
|
||||
}{
|
||||
m: make(map[extPropKey]*Properties),
|
||||
}
|
||||
|
||||
func extensionProperties(ed *ExtensionDesc) *Properties {
|
||||
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
|
||||
|
||||
extProp.RLock()
|
||||
if prop, ok := extProp.m[key]; ok {
|
||||
extProp.RUnlock()
|
||||
return prop
|
||||
}
|
||||
extProp.RUnlock()
|
||||
|
||||
extProp.Lock()
|
||||
defer extProp.Unlock()
|
||||
// Check again.
|
||||
if prop, ok := extProp.m[key]; ok {
|
||||
return prop
|
||||
}
|
||||
|
||||
prop := new(Properties)
|
||||
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
|
||||
extProp.m[key] = prop
|
||||
return prop
|
||||
}
|
||||
|
||||
// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
|
||||
func encodeExtensionMap(m map[int32]Extension) error {
|
||||
for k, e := range m {
|
||||
if e.value == nil || e.desc == nil {
|
||||
// Extension is only in its encoded form.
|
||||
continue
|
||||
}
|
||||
|
||||
// We don't skip extensions that have an encoded form set,
|
||||
// because the extension value may have been mutated after
|
||||
// the last time this function was called.
|
||||
|
||||
et := reflect.TypeOf(e.desc.ExtensionType)
|
||||
props := extensionProperties(e.desc)
|
||||
|
||||
p := NewBuffer(nil)
|
||||
// If e.value has type T, the encoder expects a *struct{ X T }.
|
||||
// Pass a *T with a zero field and hope it all works out.
|
||||
x := reflect.New(et)
|
||||
x.Elem().Set(reflect.ValueOf(e.value))
|
||||
if err := props.enc(p, props, toStructPointer(x)); err != nil {
|
||||
return err
|
||||
}
|
||||
e.enc = p.buf
|
||||
m[k] = e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func sizeExtensionMap(m map[int32]Extension) (n int) {
|
||||
for _, e := range m {
|
||||
if e.value == nil || e.desc == nil {
|
||||
// Extension is only in its encoded form.
|
||||
n += len(e.enc)
|
||||
continue
|
||||
}
|
||||
|
||||
// We don't skip extensions that have an encoded form set,
|
||||
// because the extension value may have been mutated after
|
||||
// the last time this function was called.
|
||||
|
||||
et := reflect.TypeOf(e.desc.ExtensionType)
|
||||
props := extensionProperties(e.desc)
|
||||
|
||||
// If e.value has type T, the encoder expects a *struct{ X T }.
|
||||
// Pass a *T with a zero field and hope it all works out.
|
||||
x := reflect.New(et)
|
||||
x.Elem().Set(reflect.ValueOf(e.value))
|
||||
n += props.size(props, toStructPointer(x))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// HasExtension returns whether the given extension is present in pb.
|
||||
func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
_, ok := pb.ExtensionMap()[extension.Field]
|
||||
return ok
|
||||
}
|
||||
|
||||
// ClearExtension removes the given extension from pb.
|
||||
func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
delete(pb.ExtensionMap(), extension.Field)
|
||||
}
|
||||
|
||||
// GetExtension parses and returns the given extension of pb.
|
||||
// If the extension is not present it returns ErrMissingExtension.
|
||||
func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
|
||||
if err := checkExtensionTypes(pb, extension); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
emap := pb.ExtensionMap()
|
||||
e, ok := emap[extension.Field]
|
||||
if !ok {
|
||||
return nil, ErrMissingExtension
|
||||
}
|
||||
if e.value != nil {
|
||||
// Already decoded. Check the descriptor, though.
|
||||
if e.desc != extension {
|
||||
// This shouldn't happen. If it does, it means that
|
||||
// GetExtension was called twice with two different
|
||||
// descriptors with the same field number.
|
||||
return nil, errors.New("proto: descriptor conflict")
|
||||
}
|
||||
return e.value, nil
|
||||
}
|
||||
|
||||
v, err := decodeExtension(e.enc, extension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remember the decoded version and drop the encoded version.
|
||||
// That way it is safe to mutate what we return.
|
||||
e.value = v
|
||||
e.desc = extension
|
||||
e.enc = nil
|
||||
emap[extension.Field] = e
|
||||
return e.value, nil
|
||||
}
|
||||
|
||||
// decodeExtension decodes an extension encoded in b.
|
||||
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
||||
o := NewBuffer(b)
|
||||
|
||||
t := reflect.TypeOf(extension.ExtensionType)
|
||||
rep := extension.repeated()
|
||||
|
||||
props := extensionProperties(extension)
|
||||
|
||||
// t is a pointer to a struct, pointer to basic type or a slice.
|
||||
// Allocate a "field" to store the pointer/slice itself; the
|
||||
// pointer/slice will be stored here. We pass
|
||||
// the address of this field to props.dec.
|
||||
// This passes a zero field and a *t and lets props.dec
|
||||
// interpret it as a *struct{ x t }.
|
||||
value := reflect.New(t).Elem()
|
||||
|
||||
for {
|
||||
// Discard wire type and field number varint. It isn't needed.
|
||||
if _, err := o.DecodeVarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !rep || o.index >= len(o.buf) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return value.Interface(), nil
|
||||
}
|
||||
|
||||
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
|
||||
// The returned slice has the same length as es; missing extensions will appear as nil elements.
|
||||
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
|
||||
epb, ok := pb.(extendableProto)
|
||||
if !ok {
|
||||
err = errors.New("proto: not an extendable proto")
|
||||
return
|
||||
}
|
||||
extensions = make([]interface{}, len(es))
|
||||
for i, e := range es {
|
||||
extensions[i], err = GetExtension(epb, e)
|
||||
if err == ErrMissingExtension {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetExtension sets the specified extension of pb to the specified value.
|
||||
func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
|
||||
if err := checkExtensionTypes(pb, extension); err != nil {
|
||||
return err
|
||||
}
|
||||
typ := reflect.TypeOf(extension.ExtensionType)
|
||||
if typ != reflect.TypeOf(value) {
|
||||
return errors.New("proto: bad extension value type")
|
||||
}
|
||||
|
||||
pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A global registry of extensions.
|
||||
// The generated code will register the generated descriptors by calling RegisterExtension.
|
||||
|
||||
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
|
||||
|
||||
// RegisterExtension is called from the generated code.
|
||||
func RegisterExtension(desc *ExtensionDesc) {
|
||||
st := reflect.TypeOf(desc.ExtendedType).Elem()
|
||||
m := extensionMaps[st]
|
||||
if m == nil {
|
||||
m = make(map[int32]*ExtensionDesc)
|
||||
extensionMaps[st] = m
|
||||
}
|
||||
if _, ok := m[desc.Field]; ok {
|
||||
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
|
||||
}
|
||||
m[desc.Field] = desc
|
||||
}
|
||||
|
||||
// RegisteredExtensions returns a map of the registered extensions of a
|
||||
// protocol buffer struct, indexed by the extension number.
|
||||
// The argument pb should be a nil pointer to the struct type.
|
||||
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
|
||||
return extensionMaps[reflect.TypeOf(pb).Elem()]
|
||||
}
|
||||
94
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/extensions_test.go
generated
vendored
Normal file
94
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/extensions_test.go
generated
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// http://code.google.com/p/goprotobuf/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
pb "./testdata"
|
||||
"code.google.com/p/goprotobuf/proto"
|
||||
)
|
||||
|
||||
func TestGetExtensionsWithMissingExtensions(t *testing.T) {
|
||||
msg := &pb.MyMessage{}
|
||||
ext1 := &pb.Ext{}
|
||||
if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {
|
||||
t.Fatalf("Could not set ext1: %s", ext1)
|
||||
}
|
||||
exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{
|
||||
pb.E_Ext_More,
|
||||
pb.E_Ext_Text,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("GetExtensions() failed: %s", err)
|
||||
}
|
||||
if exts[0] != ext1 {
|
||||
t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0])
|
||||
}
|
||||
if exts[1] != nil {
|
||||
t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1])
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetExtensionStability(t *testing.T) {
|
||||
check := func(m *pb.MyMessage) bool {
|
||||
ext1, err := proto.GetExtension(m, pb.E_Ext_More)
|
||||
if err != nil {
|
||||
t.Fatalf("GetExtension() failed: %s", err)
|
||||
}
|
||||
ext2, err := proto.GetExtension(m, pb.E_Ext_More)
|
||||
if err != nil {
|
||||
t.Fatalf("GetExtension() failed: %s", err)
|
||||
}
|
||||
return ext1 == ext2
|
||||
}
|
||||
msg := &pb.MyMessage{Count: proto.Int32(4)}
|
||||
ext0 := &pb.Ext{}
|
||||
if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil {
|
||||
t.Fatalf("Could not set ext1: %s", ext0)
|
||||
}
|
||||
if !check(msg) {
|
||||
t.Errorf("GetExtension() not stable before marshaling")
|
||||
}
|
||||
bb, err := proto.Marshal(msg)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal() failed: %s", err)
|
||||
}
|
||||
msg1 := &pb.MyMessage{}
|
||||
err = proto.Unmarshal(bb, msg1)
|
||||
if err != nil {
|
||||
t.Fatalf("Unmarshal() failed: %s", err)
|
||||
}
|
||||
if !check(msg1) {
|
||||
t.Errorf("GetExtension() not stable after unmarshaling")
|
||||
}
|
||||
}
|
||||
740
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/lib.go
generated
vendored
Normal file
740
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/lib.go
generated
vendored
Normal file
@@ -0,0 +1,740 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// http://code.google.com/p/goprotobuf/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
/*
|
||||
Package proto converts data structures to and from the wire format of
|
||||
protocol buffers. It works in concert with the Go source code generated
|
||||
for .proto files by the protocol compiler.
|
||||
|
||||
A summary of the properties of the protocol buffer interface
|
||||
for a protocol buffer variable v:
|
||||
|
||||
- Names are turned from camel_case to CamelCase for export.
|
||||
- There are no methods on v to set fields; just treat
|
||||
them as structure fields.
|
||||
- There are getters that return a field's value if set,
|
||||
and return the field's default value if unset.
|
||||
The getters work even if the receiver is a nil message.
|
||||
- The zero value for a struct is its correct initialization state.
|
||||
All desired fields must be set before marshaling.
|
||||
- A Reset() method will restore a protobuf struct to its zero state.
|
||||
- Non-repeated fields are pointers to the values; nil means unset.
|
||||
That is, optional or required field int32 f becomes F *int32.
|
||||
- Repeated fields are slices.
|
||||
- Helper functions are available to aid the setting of fields.
|
||||
Helpers for getting values are superseded by the
|
||||
GetFoo methods and their use is deprecated.
|
||||
msg.Foo = proto.String("hello") // set field
|
||||
- Constants are defined to hold the default values of all fields that
|
||||
have them. They have the form Default_StructName_FieldName.
|
||||
Because the getter methods handle defaulted values,
|
||||
direct use of these constants should be rare.
|
||||
- Enums are given type names and maps from names to values.
|
||||
Enum values are prefixed with the enum's type name. Enum types have
|
||||
a String method, and a Enum method to assist in message construction.
|
||||
- Nested groups and enums have type names prefixed with the name of
|
||||
the surrounding message type.
|
||||
- Extensions are given descriptor names that start with E_,
|
||||
followed by an underscore-delimited list of the nested messages
|
||||
that contain it (if any) followed by the CamelCased name of the
|
||||
extension field itself. HasExtension, ClearExtension, GetExtension
|
||||
and SetExtension are functions for manipulating extensions.
|
||||
- Marshal and Unmarshal are functions to encode and decode the wire format.
|
||||
|
||||
The simplest way to describe this is to see an example.
|
||||
Given file test.proto, containing
|
||||
|
||||
package example;
|
||||
|
||||
enum FOO { X = 17; };
|
||||
|
||||
message Test {
|
||||
required string label = 1;
|
||||
optional int32 type = 2 [default=77];
|
||||
repeated int64 reps = 3;
|
||||
optional group OptionalGroup = 4 {
|
||||
required string RequiredField = 5;
|
||||
}
|
||||
}
|
||||
|
||||
The resulting file, test.pb.go, is:
|
||||
|
||||
package example
|
||||
|
||||
import "code.google.com/p/goprotobuf/proto"
|
||||
|
||||
type FOO int32
|
||||
const (
|
||||
FOO_X FOO = 17
|
||||
)
|
||||
var FOO_name = map[int32]string{
|
||||
17: "X",
|
||||
}
|
||||
var FOO_value = map[string]int32{
|
||||
"X": 17,
|
||||
}
|
||||
|
||||
func (x FOO) Enum() *FOO {
|
||||
p := new(FOO)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
func (x FOO) String() string {
|
||||
return proto.EnumName(FOO_name, int32(x))
|
||||
}
|
||||
|
||||
type Test struct {
|
||||
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
|
||||
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
|
||||
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
|
||||
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
func (this *Test) Reset() { *this = Test{} }
|
||||
func (this *Test) String() string { return proto.CompactTextString(this) }
|
||||
const Default_Test_Type int32 = 77
|
||||
|
||||
func (this *Test) GetLabel() string {
|
||||
if this != nil && this.Label != nil {
|
||||
return *this.Label
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (this *Test) GetType() int32 {
|
||||
if this != nil && this.Type != nil {
|
||||
return *this.Type
|
||||
}
|
||||
return Default_Test_Type
|
||||
}
|
||||
|
||||
func (this *Test) GetOptionalgroup() *Test_OptionalGroup {
|
||||
if this != nil {
|
||||
return this.Optionalgroup
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Test_OptionalGroup struct {
|
||||
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
func (this *Test_OptionalGroup) Reset() { *this = Test_OptionalGroup{} }
|
||||
func (this *Test_OptionalGroup) String() string { return proto.CompactTextString(this) }
|
||||
|
||||
func (this *Test_OptionalGroup) GetRequiredField() string {
|
||||
if this != nil && this.RequiredField != nil {
|
||||
return *this.RequiredField
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
|
||||
}
|
||||
|
||||
To create and play with a Test object:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"code.google.com/p/goprotobuf/proto"
|
||||
"./example.pb"
|
||||
)
|
||||
|
||||
func main() {
|
||||
test := &example.Test{
|
||||
Label: proto.String("hello"),
|
||||
Type: proto.Int32(17),
|
||||
Optionalgroup: &example.Test_OptionalGroup{
|
||||
RequiredField: proto.String("good bye"),
|
||||
},
|
||||
}
|
||||
data, err := proto.Marshal(test)
|
||||
if err != nil {
|
||||
log.Fatal("marshaling error: ", err)
|
||||
}
|
||||
newTest := new(example.Test)
|
||||
err = proto.Unmarshal(data, newTest)
|
||||
if err != nil {
|
||||
log.Fatal("unmarshaling error: ", err)
|
||||
}
|
||||
// Now test and newTest contain the same data.
|
||||
if test.GetLabel() != newTest.GetLabel() {
|
||||
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
|
||||
}
|
||||
// etc.
|
||||
}
|
||||
*/
|
||||
package proto
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Message is implemented by generated protocol buffer messages.
|
||||
type Message interface {
|
||||
Reset()
|
||||
String() string
|
||||
ProtoMessage()
|
||||
}
|
||||
|
||||
// Stats records allocation details about the protocol buffer encoders
|
||||
// and decoders. Useful for tuning the library itself.
|
||||
type Stats struct {
|
||||
Emalloc uint64 // mallocs in encode
|
||||
Dmalloc uint64 // mallocs in decode
|
||||
Encode uint64 // number of encodes
|
||||
Decode uint64 // number of decodes
|
||||
Chit uint64 // number of cache hits
|
||||
Cmiss uint64 // number of cache misses
|
||||
Size uint64 // number of sizes
|
||||
}
|
||||
|
||||
// Set to true to enable stats collection.
|
||||
const collectStats = false
|
||||
|
||||
var stats Stats
|
||||
|
||||
// GetStats returns a copy of the global Stats structure.
|
||||
func GetStats() Stats { return stats }
|
||||
|
||||
// A Buffer is a buffer manager for marshaling and unmarshaling
|
||||
// protocol buffers. It may be reused between invocations to
|
||||
// reduce memory usage. It is not necessary to use a Buffer;
|
||||
// the global functions Marshal and Unmarshal create a
|
||||
// temporary Buffer and are fine for most applications.
|
||||
type Buffer struct {
|
||||
buf []byte // encode/decode byte stream
|
||||
index int // write point
|
||||
|
||||
// pools of basic types to amortize allocation.
|
||||
bools []bool
|
||||
uint32s []uint32
|
||||
uint64s []uint64
|
||||
|
||||
// extra pools, only used with pointer_reflect.go
|
||||
int32s []int32
|
||||
int64s []int64
|
||||
float32s []float32
|
||||
float64s []float64
|
||||
}
|
||||
|
||||
// NewBuffer allocates a new Buffer and initializes its internal data to
|
||||
// the contents of the argument slice.
|
||||
func NewBuffer(e []byte) *Buffer {
|
||||
return &Buffer{buf: e}
|
||||
}
|
||||
|
||||
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
|
||||
func (p *Buffer) Reset() {
|
||||
p.buf = p.buf[0:0] // for reading/writing
|
||||
p.index = 0 // for reading
|
||||
}
|
||||
|
||||
// SetBuf replaces the internal buffer with the slice,
|
||||
// ready for unmarshaling the contents of the slice.
|
||||
func (p *Buffer) SetBuf(s []byte) {
|
||||
p.buf = s
|
||||
p.index = 0
|
||||
}
|
||||
|
||||
// Bytes returns the contents of the Buffer.
|
||||
func (p *Buffer) Bytes() []byte { return p.buf }
|
||||
|
||||
/*
|
||||
* Helper routines for simplifying the creation of optional fields of basic type.
|
||||
*/
|
||||
|
||||
// Bool is a helper routine that allocates a new bool value
|
||||
// to store v and returns a pointer to it.
|
||||
func Bool(v bool) *bool {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int32 is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int32(v int32) *int32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it, but unlike Int32
|
||||
// its argument value is an int.
|
||||
func Int(v int) *int32 {
|
||||
p := new(int32)
|
||||
*p = int32(v)
|
||||
return p
|
||||
}
|
||||
|
||||
// Int64 is a helper routine that allocates a new int64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int64(v int64) *int64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float32 is a helper routine that allocates a new float32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float32(v float32) *float32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float64 is a helper routine that allocates a new float64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float64(v float64) *float64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint32 is a helper routine that allocates a new uint32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint32(v uint32) *uint32 {
|
||||
p := new(uint32)
|
||||
*p = v
|
||||
return p
|
||||
}
|
||||
|
||||
// Uint64 is a helper routine that allocates a new uint64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint64(v uint64) *uint64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// String is a helper routine that allocates a new string value
|
||||
// to store v and returns a pointer to it.
|
||||
func String(v string) *string {
|
||||
return &v
|
||||
}
|
||||
|
||||
// EnumName is a helper function to simplify printing protocol buffer enums
|
||||
// by name. Given an enum map and a value, it returns a useful string.
|
||||
func EnumName(m map[int32]string, v int32) string {
|
||||
s, ok := m[v]
|
||||
if ok {
|
||||
return s
|
||||
}
|
||||
return strconv.Itoa(int(v))
|
||||
}
|
||||
|
||||
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
|
||||
// from their JSON-encoded representation. Given a map from the enum's symbolic
|
||||
// names to its int values, and a byte buffer containing the JSON-encoded
|
||||
// value, it returns an int32 that can be cast to the enum type by the caller.
|
||||
//
|
||||
// The function can deal with both JSON representations, numeric and symbolic.
|
||||
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
||||
if data[0] == '"' {
|
||||
// New style: enums are strings.
|
||||
var repr string
|
||||
if err := json.Unmarshal(data, &repr); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
val, ok := m[repr]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
// Old style: enums are ints.
|
||||
var val int32
|
||||
if err := json.Unmarshal(data, &val); err != nil {
|
||||
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// DebugPrint dumps the encoded data in b in a debugging format with a header
|
||||
// including the string s. Used in testing but made available for general debugging.
|
||||
func (o *Buffer) DebugPrint(s string, b []byte) {
|
||||
var u uint64
|
||||
|
||||
obuf := o.buf
|
||||
index := o.index
|
||||
o.buf = b
|
||||
o.index = 0
|
||||
depth := 0
|
||||
|
||||
fmt.Printf("\n--- %s ---\n", s)
|
||||
|
||||
out:
|
||||
for {
|
||||
for i := 0; i < depth; i++ {
|
||||
fmt.Print(" ")
|
||||
}
|
||||
|
||||
index := o.index
|
||||
if index == len(o.buf) {
|
||||
break
|
||||
}
|
||||
|
||||
op, err := o.DecodeVarint()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: fetching op err %v\n", index, err)
|
||||
break out
|
||||
}
|
||||
tag := op >> 3
|
||||
wire := op & 7
|
||||
|
||||
switch wire {
|
||||
default:
|
||||
fmt.Printf("%3d: t=%3d unknown wire=%d\n",
|
||||
index, tag, wire)
|
||||
break out
|
||||
|
||||
case WireBytes:
|
||||
var r []byte
|
||||
|
||||
r, err = o.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
|
||||
if len(r) <= 6 {
|
||||
for i := 0; i < len(r); i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < 3; i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
fmt.Printf(" ..")
|
||||
for i := len(r) - 3; i < len(r); i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
case WireFixed32:
|
||||
u, err = o.DecodeFixed32()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
|
||||
|
||||
case WireFixed64:
|
||||
u, err = o.DecodeFixed64()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
|
||||
break
|
||||
|
||||
case WireVarint:
|
||||
u, err = o.DecodeVarint()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
|
||||
|
||||
case WireStartGroup:
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d start err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d start\n", index, tag)
|
||||
depth++
|
||||
|
||||
case WireEndGroup:
|
||||
depth--
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d end err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d end\n", index, tag)
|
||||
}
|
||||
}
|
||||
|
||||
if depth != 0 {
|
||||
fmt.Printf("%3d: start-end not balanced %d\n", o.index, depth)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
o.buf = obuf
|
||||
o.index = index
|
||||
}
|
||||
|
||||
// SetDefaults sets unset protocol buffer fields to their default values.
|
||||
// It only modifies fields that are both unset and have defined defaults.
|
||||
// It recursively sets default values in any non-nil sub-messages.
|
||||
func SetDefaults(pb Message) {
|
||||
setDefaults(reflect.ValueOf(pb), true, false)
|
||||
}
|
||||
|
||||
// v is a pointer to a struct.
|
||||
func setDefaults(v reflect.Value, recur, zeros bool) {
|
||||
v = v.Elem()
|
||||
|
||||
defaultMu.RLock()
|
||||
dm, ok := defaults[v.Type()]
|
||||
defaultMu.RUnlock()
|
||||
if !ok {
|
||||
dm = buildDefaultMessage(v.Type())
|
||||
defaultMu.Lock()
|
||||
defaults[v.Type()] = dm
|
||||
defaultMu.Unlock()
|
||||
}
|
||||
|
||||
for _, sf := range dm.scalars {
|
||||
f := v.Field(sf.index)
|
||||
if !f.IsNil() {
|
||||
// field already set
|
||||
continue
|
||||
}
|
||||
dv := sf.value
|
||||
if dv == nil && !zeros {
|
||||
// no explicit default, and don't want to set zeros
|
||||
continue
|
||||
}
|
||||
fptr := f.Addr().Interface() // **T
|
||||
// TODO: Consider batching the allocations we do here.
|
||||
switch sf.kind {
|
||||
case reflect.Bool:
|
||||
b := new(bool)
|
||||
if dv != nil {
|
||||
*b = dv.(bool)
|
||||
}
|
||||
*(fptr.(**bool)) = b
|
||||
case reflect.Float32:
|
||||
f := new(float32)
|
||||
if dv != nil {
|
||||
*f = dv.(float32)
|
||||
}
|
||||
*(fptr.(**float32)) = f
|
||||
case reflect.Float64:
|
||||
f := new(float64)
|
||||
if dv != nil {
|
||||
*f = dv.(float64)
|
||||
}
|
||||
*(fptr.(**float64)) = f
|
||||
case reflect.Int32:
|
||||
// might be an enum
|
||||
if ft := f.Type(); ft != int32PtrType {
|
||||
// enum
|
||||
f.Set(reflect.New(ft.Elem()))
|
||||
if dv != nil {
|
||||
f.Elem().SetInt(int64(dv.(int32)))
|
||||
}
|
||||
} else {
|
||||
// int32 field
|
||||
i := new(int32)
|
||||
if dv != nil {
|
||||
*i = dv.(int32)
|
||||
}
|
||||
*(fptr.(**int32)) = i
|
||||
}
|
||||
case reflect.Int64:
|
||||
i := new(int64)
|
||||
if dv != nil {
|
||||
*i = dv.(int64)
|
||||
}
|
||||
*(fptr.(**int64)) = i
|
||||
case reflect.String:
|
||||
s := new(string)
|
||||
if dv != nil {
|
||||
*s = dv.(string)
|
||||
}
|
||||
*(fptr.(**string)) = s
|
||||
case reflect.Uint8:
|
||||
// exceptional case: []byte
|
||||
var b []byte
|
||||
if dv != nil {
|
||||
db := dv.([]byte)
|
||||
b = make([]byte, len(db))
|
||||
copy(b, db)
|
||||
} else {
|
||||
b = []byte{}
|
||||
}
|
||||
*(fptr.(*[]byte)) = b
|
||||
case reflect.Uint32:
|
||||
u := new(uint32)
|
||||
if dv != nil {
|
||||
*u = dv.(uint32)
|
||||
}
|
||||
*(fptr.(**uint32)) = u
|
||||
case reflect.Uint64:
|
||||
u := new(uint64)
|
||||
if dv != nil {
|
||||
*u = dv.(uint64)
|
||||
}
|
||||
*(fptr.(**uint64)) = u
|
||||
default:
|
||||
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
|
||||
}
|
||||
}
|
||||
|
||||
for _, ni := range dm.nested {
|
||||
f := v.Field(ni)
|
||||
if f.IsNil() {
|
||||
continue
|
||||
}
|
||||
// f is *T or []*T
|
||||
if f.Kind() == reflect.Ptr {
|
||||
setDefaults(f, recur, zeros)
|
||||
} else {
|
||||
for i := 0; i < f.Len(); i++ {
|
||||
e := f.Index(i)
|
||||
if e.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(e, recur, zeros)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// defaults maps a protocol buffer struct type to a slice of the fields,
|
||||
// with its scalar fields set to their proto-declared non-zero default values.
|
||||
defaultMu sync.RWMutex
|
||||
defaults = make(map[reflect.Type]defaultMessage)
|
||||
|
||||
int32PtrType = reflect.TypeOf((*int32)(nil))
|
||||
)
|
||||
|
||||
// defaultMessage represents information about the default values of a message.
|
||||
type defaultMessage struct {
|
||||
scalars []scalarField
|
||||
nested []int // struct field index of nested messages
|
||||
}
|
||||
|
||||
type scalarField struct {
|
||||
index int // struct field index
|
||||
kind reflect.Kind // element type (the T in *T or []T)
|
||||
value interface{} // the proto-declared default value, or nil
|
||||
}
|
||||
|
||||
func ptrToStruct(t reflect.Type) bool {
|
||||
return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct
|
||||
}
|
||||
|
||||
// t is a struct type.
|
||||
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
|
||||
sprop := GetProperties(t)
|
||||
for _, prop := range sprop.Prop {
|
||||
fi, ok := sprop.decoderTags.get(prop.Tag)
|
||||
if !ok {
|
||||
// XXX_unrecognized
|
||||
continue
|
||||
}
|
||||
ft := t.Field(fi).Type
|
||||
|
||||
// nested messages
|
||||
if ptrToStruct(ft) || (ft.Kind() == reflect.Slice && ptrToStruct(ft.Elem())) {
|
||||
dm.nested = append(dm.nested, fi)
|
||||
continue
|
||||
}
|
||||
|
||||
sf := scalarField{
|
||||
index: fi,
|
||||
kind: ft.Elem().Kind(),
|
||||
}
|
||||
|
||||
// scalar fields without defaults
|
||||
if !prop.HasDefault {
|
||||
dm.scalars = append(dm.scalars, sf)
|
||||
continue
|
||||
}
|
||||
|
||||
// a scalar field: either *T or []byte
|
||||
switch ft.Elem().Kind() {
|
||||
case reflect.Bool:
|
||||
x, err := strconv.ParseBool(prop.Default)
|
||||
if err != nil {
|
||||
log.Printf("proto: bad default bool %q: %v", prop.Default, err)
|
||||
continue
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.Float32:
|
||||
x, err := strconv.ParseFloat(prop.Default, 32)
|
||||
if err != nil {
|
||||
log.Printf("proto: bad default float32 %q: %v", prop.Default, err)
|
||||
continue
|
||||
}
|
||||
sf.value = float32(x)
|
||||
case reflect.Float64:
|
||||
x, err := strconv.ParseFloat(prop.Default, 64)
|
||||
if err != nil {
|
||||
log.Printf("proto: bad default float64 %q: %v", prop.Default, err)
|
||||
continue
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.Int32:
|
||||
x, err := strconv.ParseInt(prop.Default, 10, 32)
|
||||
if err != nil {
|
||||
log.Printf("proto: bad default int32 %q: %v", prop.Default, err)
|
||||
continue
|
||||
}
|
||||
sf.value = int32(x)
|
||||
case reflect.Int64:
|
||||
x, err := strconv.ParseInt(prop.Default, 10, 64)
|
||||
if err != nil {
|
||||
log.Printf("proto: bad default int64 %q: %v", prop.Default, err)
|
||||
continue
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.String:
|
||||
sf.value = prop.Default
|
||||
case reflect.Uint8:
|
||||
// []byte (not *uint8)
|
||||
sf.value = []byte(prop.Default)
|
||||
case reflect.Uint32:
|
||||
x, err := strconv.ParseUint(prop.Default, 10, 32)
|
||||
if err != nil {
|
||||
log.Printf("proto: bad default uint32 %q: %v", prop.Default, err)
|
||||
continue
|
||||
}
|
||||
sf.value = uint32(x)
|
||||
case reflect.Uint64:
|
||||
x, err := strconv.ParseUint(prop.Default, 10, 64)
|
||||
if err != nil {
|
||||
log.Printf("proto: bad default uint64 %q: %v", prop.Default, err)
|
||||
continue
|
||||
}
|
||||
sf.value = x
|
||||
default:
|
||||
log.Printf("proto: unhandled def kind %v", ft.Elem().Kind())
|
||||
continue
|
||||
}
|
||||
|
||||
dm.scalars = append(dm.scalars, sf)
|
||||
}
|
||||
|
||||
return dm
|
||||
}
|
||||
287
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/message_set.go
generated
vendored
Normal file
287
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/message_set.go
generated
vendored
Normal file
@@ -0,0 +1,287 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// http://code.google.com/p/goprotobuf/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Support for message sets.
|
||||
*/
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID.
|
||||
// A message type ID is required for storing a protocol buffer in a message set.
|
||||
var ErrNoMessageTypeId = errors.New("proto does not have a message type ID")
|
||||
|
||||
// The first two types (_MessageSet_Item and MessageSet)
|
||||
// model what the protocol compiler produces for the following protocol message:
|
||||
// message MessageSet {
|
||||
// repeated group Item = 1 {
|
||||
// required int32 type_id = 2;
|
||||
// required string message = 3;
|
||||
// };
|
||||
// }
|
||||
// That is the MessageSet wire format. We can't use a proto to generate these
|
||||
// because that would introduce a circular dependency between it and this package.
|
||||
//
|
||||
// When a proto1 proto has a field that looks like:
|
||||
// optional message<MessageSet> info = 3;
|
||||
// the protocol compiler produces a field in the generated struct that looks like:
|
||||
// Info *_proto_.MessageSet `protobuf:"bytes,3,opt,name=info"`
|
||||
// The package is automatically inserted so there is no need for that proto file to
|
||||
// import this package.
|
||||
|
||||
type _MessageSet_Item struct {
|
||||
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
|
||||
Message []byte `protobuf:"bytes,3,req,name=message"`
|
||||
}
|
||||
|
||||
type MessageSet struct {
|
||||
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
|
||||
XXX_unrecognized []byte
|
||||
// TODO: caching?
|
||||
}
|
||||
|
||||
// Make sure MessageSet is a Message.
|
||||
var _ Message = (*MessageSet)(nil)
|
||||
|
||||
// messageTypeIder is an interface satisfied by a protocol buffer type
|
||||
// that may be stored in a MessageSet.
|
||||
type messageTypeIder interface {
|
||||
MessageTypeId() int32
|
||||
}
|
||||
|
||||
func (ms *MessageSet) find(pb Message) *_MessageSet_Item {
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
id := mti.MessageTypeId()
|
||||
for _, item := range ms.Item {
|
||||
if *item.TypeId == id {
|
||||
return item
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *MessageSet) Has(pb Message) bool {
|
||||
if ms.find(pb) != nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (ms *MessageSet) Unmarshal(pb Message) error {
|
||||
if item := ms.find(pb); item != nil {
|
||||
return Unmarshal(item.Message, pb)
|
||||
}
|
||||
if _, ok := pb.(messageTypeIder); !ok {
|
||||
return ErrNoMessageTypeId
|
||||
}
|
||||
return nil // TODO: return error instead?
|
||||
}
|
||||
|
||||
func (ms *MessageSet) Marshal(pb Message) error {
|
||||
msg, err := Marshal(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if item := ms.find(pb); item != nil {
|
||||
// reuse existing item
|
||||
item.Message = msg
|
||||
return nil
|
||||
}
|
||||
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return ErrNoMessageTypeId
|
||||
}
|
||||
|
||||
mtid := mti.MessageTypeId()
|
||||
ms.Item = append(ms.Item, &_MessageSet_Item{
|
||||
TypeId: &mtid,
|
||||
Message: msg,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *MessageSet) Reset() { *ms = MessageSet{} }
|
||||
func (ms *MessageSet) String() string { return CompactTextString(ms) }
|
||||
func (*MessageSet) ProtoMessage() {}
|
||||
|
||||
// Support for the message_set_wire_format message option.
|
||||
|
||||
func skipVarint(buf []byte) []byte {
|
||||
i := 0
|
||||
for ; buf[i]&0x80 != 0; i++ {
|
||||
}
|
||||
return buf[i+1:]
|
||||
}
|
||||
|
||||
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
|
||||
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
|
||||
if err := encodeExtensionMap(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Sort extension IDs to provide a deterministic encoding.
|
||||
// See also enc_map in encode.go.
|
||||
ids := make([]int, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, int(id))
|
||||
}
|
||||
sort.Ints(ids)
|
||||
|
||||
ms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
|
||||
for _, id := range ids {
|
||||
e := m[int32(id)]
|
||||
// Remove the wire type and field number varint, as well as the length varint.
|
||||
msg := skipVarint(skipVarint(e.enc))
|
||||
|
||||
ms.Item = append(ms.Item, &_MessageSet_Item{
|
||||
TypeId: Int32(int32(id)),
|
||||
Message: msg,
|
||||
})
|
||||
}
|
||||
return Marshal(ms)
|
||||
}
|
||||
|
||||
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
||||
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
|
||||
ms := new(MessageSet)
|
||||
if err := Unmarshal(buf, ms); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, item := range ms.Item {
|
||||
id := *item.TypeId
|
||||
msg := item.Message
|
||||
|
||||
// Restore wire type and field number varint, plus length varint.
|
||||
// Be careful to preserve duplicate items.
|
||||
b := EncodeVarint(uint64(id)<<3 | WireBytes)
|
||||
if ext, ok := m[id]; ok {
|
||||
// Existing data; rip off the tag and length varint
|
||||
// so we join the new data correctly.
|
||||
// We can assume that ext.enc is set because we are unmarshaling.
|
||||
o := ext.enc[len(b):] // skip wire type and field number
|
||||
_, n := DecodeVarint(o) // calculate length of length varint
|
||||
o = o[n:] // skip length varint
|
||||
msg = append(o, msg...) // join old data and new data
|
||||
}
|
||||
b = append(b, EncodeVarint(uint64(len(msg)))...)
|
||||
b = append(b, msg...)
|
||||
|
||||
m[id] = Extension{enc: b}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
|
||||
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
|
||||
var b bytes.Buffer
|
||||
b.WriteByte('{')
|
||||
|
||||
// Process the map in key order for deterministic output.
|
||||
ids := make([]int32, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
|
||||
|
||||
for i, id := range ids {
|
||||
ext := m[id]
|
||||
if i > 0 {
|
||||
b.WriteByte(',')
|
||||
}
|
||||
|
||||
msd, ok := messageSetMap[id]
|
||||
if !ok {
|
||||
// Unknown type; we can't render it, so skip it.
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(&b, `"[%s]":`, msd.name)
|
||||
|
||||
x := ext.value
|
||||
if x == nil {
|
||||
x = reflect.New(msd.t.Elem()).Interface()
|
||||
if err := Unmarshal(ext.enc, x.(Message)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
d, err := json.Marshal(x)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.Write(d)
|
||||
}
|
||||
b.WriteByte('}')
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
|
||||
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
|
||||
// Common-case fast path.
|
||||
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is fairly tricky, and it's not clear that it is needed.
|
||||
return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
|
||||
}
|
||||
|
||||
// A global registry of types that can be used in a MessageSet.
|
||||
|
||||
var messageSetMap = make(map[int32]messageSetDesc)
|
||||
|
||||
type messageSetDesc struct {
|
||||
t reflect.Type // pointer to struct
|
||||
name string
|
||||
}
|
||||
|
||||
// RegisterMessageSetType is called from the generated code.
|
||||
func RegisterMessageSetType(m Message, fieldNum int32, name string) {
|
||||
messageSetMap[fieldNum] = messageSetDesc{
|
||||
t: reflect.TypeOf(m),
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
66
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/message_set_test.go
generated
vendored
Normal file
66
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/message_set_test.go
generated
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// http://code.google.com/p/goprotobuf/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestUnmarshalMessageSetWithDuplicate(t *testing.T) {
|
||||
// Check that a repeated message set entry will be concatenated.
|
||||
in := &MessageSet{
|
||||
Item: []*_MessageSet_Item{
|
||||
{TypeId: Int32(12345), Message: []byte("hoo")},
|
||||
{TypeId: Int32(12345), Message: []byte("hah")},
|
||||
},
|
||||
}
|
||||
b, err := Marshal(in)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal: %v", err)
|
||||
}
|
||||
t.Logf("Marshaled bytes: %q", b)
|
||||
|
||||
m := make(map[int32]Extension)
|
||||
if err := UnmarshalMessageSet(b, m); err != nil {
|
||||
t.Fatalf("UnmarshalMessageSet: %v", err)
|
||||
}
|
||||
ext, ok := m[12345]
|
||||
if !ok {
|
||||
t.Fatalf("Didn't retrieve extension 12345; map is %v", m)
|
||||
}
|
||||
// Skip wire type/field number and length varints.
|
||||
got := skipVarint(skipVarint(ext.enc))
|
||||
if want := []byte("hoohah"); !bytes.Equal(got, want) {
|
||||
t.Errorf("Combined extension is %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
384
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/pointer_reflect.go
generated
vendored
Normal file
384
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/pointer_reflect.go
generated
vendored
Normal file
@@ -0,0 +1,384 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// http://code.google.com/p/goprotobuf/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build appengine,!appenginevm
|
||||
|
||||
// This file contains an implementation of proto field accesses using package reflect.
|
||||
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
||||
// be used on App Engine.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// A structPointer is a pointer to a struct.
|
||||
type structPointer struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
// toStructPointer returns a structPointer equivalent to the given reflect value.
|
||||
// The reflect value must itself be a pointer to a struct.
|
||||
func toStructPointer(v reflect.Value) structPointer {
|
||||
return structPointer{v}
|
||||
}
|
||||
|
||||
// IsNil reports whether p is nil.
|
||||
func structPointer_IsNil(p structPointer) bool {
|
||||
return p.v.IsNil()
|
||||
}
|
||||
|
||||
// Interface returns the struct pointer as an interface value.
|
||||
func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
|
||||
return p.v.Interface()
|
||||
}
|
||||
|
||||
// A field identifies a field in a struct, accessible from a structPointer.
|
||||
// In this implementation, a field is identified by the sequence of field indices
|
||||
// passed to reflect's FieldByIndex.
|
||||
type field []int
|
||||
|
||||
// toField returns a field equivalent to the given reflect field.
|
||||
func toField(f *reflect.StructField) field {
|
||||
return f.Index
|
||||
}
|
||||
|
||||
// invalidField is an invalid field identifier.
|
||||
var invalidField = field(nil)
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool { return f != nil }
|
||||
|
||||
// field returns the given field in the struct as a reflect value.
|
||||
func structPointer_field(p structPointer, f field) reflect.Value {
|
||||
// Special case: an extension map entry with a value of type T
|
||||
// passes a *T to the struct-handling code with a zero field,
|
||||
// expecting that it will be treated as equivalent to *struct{ X T },
|
||||
// which has the same memory layout. We have to handle that case
|
||||
// specially, because reflect will panic if we call FieldByIndex on a
|
||||
// non-struct.
|
||||
if f == nil {
|
||||
return p.v.Elem()
|
||||
}
|
||||
|
||||
return p.v.Elem().FieldByIndex(f)
|
||||
}
|
||||
|
||||
// ifield returns the given field in the struct as an interface value.
|
||||
func structPointer_ifield(p structPointer, f field) interface{} {
|
||||
return structPointer_field(p, f).Addr().Interface()
|
||||
}
|
||||
|
||||
// Bytes returns the address of a []byte field in the struct.
|
||||
func structPointer_Bytes(p structPointer, f field) *[]byte {
|
||||
return structPointer_ifield(p, f).(*[]byte)
|
||||
}
|
||||
|
||||
// BytesSlice returns the address of a [][]byte field in the struct.
|
||||
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
|
||||
return structPointer_ifield(p, f).(*[][]byte)
|
||||
}
|
||||
|
||||
// Bool returns the address of a *bool field in the struct.
|
||||
func structPointer_Bool(p structPointer, f field) **bool {
|
||||
return structPointer_ifield(p, f).(**bool)
|
||||
}
|
||||
|
||||
// BoolSlice returns the address of a []bool field in the struct.
|
||||
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
|
||||
return structPointer_ifield(p, f).(*[]bool)
|
||||
}
|
||||
|
||||
// String returns the address of a *string field in the struct.
|
||||
func structPointer_String(p structPointer, f field) **string {
|
||||
return structPointer_ifield(p, f).(**string)
|
||||
}
|
||||
|
||||
// StringSlice returns the address of a []string field in the struct.
|
||||
func structPointer_StringSlice(p structPointer, f field) *[]string {
|
||||
return structPointer_ifield(p, f).(*[]string)
|
||||
}
|
||||
|
||||
// ExtMap returns the address of an extension map field in the struct.
|
||||
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
||||
return structPointer_ifield(p, f).(*map[int32]Extension)
|
||||
}
|
||||
|
||||
// SetStructPointer writes a *struct field in the struct.
|
||||
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
|
||||
structPointer_field(p, f).Set(q.v)
|
||||
}
|
||||
|
||||
// GetStructPointer reads a *struct field in the struct.
|
||||
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
|
||||
return structPointer{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// StructPointerSlice the address of a []*struct field in the struct.
|
||||
func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
|
||||
return structPointerSlice{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// A structPointerSlice represents the address of a slice of pointers to structs
|
||||
// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
|
||||
type structPointerSlice struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
func (p structPointerSlice) Len() int { return p.v.Len() }
|
||||
func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
|
||||
func (p structPointerSlice) Append(q structPointer) {
|
||||
p.v.Set(reflect.Append(p.v, q.v))
|
||||
}
|
||||
|
||||
var (
|
||||
int32Type = reflect.TypeOf(int32(0))
|
||||
uint32Type = reflect.TypeOf(uint32(0))
|
||||
float32Type = reflect.TypeOf(float32(0))
|
||||
int64Type = reflect.TypeOf(int64(0))
|
||||
uint64Type = reflect.TypeOf(uint64(0))
|
||||
float64Type = reflect.TypeOf(float64(0))
|
||||
)
|
||||
|
||||
// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
|
||||
// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
|
||||
type word32 struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
// IsNil reports whether p is nil.
|
||||
func word32_IsNil(p word32) bool {
|
||||
return p.v.IsNil()
|
||||
}
|
||||
|
||||
// Set sets p to point at a newly allocated word with bits set to x.
|
||||
func word32_Set(p word32, o *Buffer, x uint32) {
|
||||
t := p.v.Type().Elem()
|
||||
switch t {
|
||||
case int32Type:
|
||||
if len(o.int32s) == 0 {
|
||||
o.int32s = make([]int32, uint32PoolSize)
|
||||
}
|
||||
o.int32s[0] = int32(x)
|
||||
p.v.Set(reflect.ValueOf(&o.int32s[0]))
|
||||
o.int32s = o.int32s[1:]
|
||||
return
|
||||
case uint32Type:
|
||||
if len(o.uint32s) == 0 {
|
||||
o.uint32s = make([]uint32, uint32PoolSize)
|
||||
}
|
||||
o.uint32s[0] = x
|
||||
p.v.Set(reflect.ValueOf(&o.uint32s[0]))
|
||||
o.uint32s = o.uint32s[1:]
|
||||
return
|
||||
case float32Type:
|
||||
if len(o.float32s) == 0 {
|
||||
o.float32s = make([]float32, uint32PoolSize)
|
||||
}
|
||||
o.float32s[0] = math.Float32frombits(x)
|
||||
p.v.Set(reflect.ValueOf(&o.float32s[0]))
|
||||
o.float32s = o.float32s[1:]
|
||||
return
|
||||
}
|
||||
|
||||
// must be enum
|
||||
p.v.Set(reflect.New(t))
|
||||
p.v.Elem().SetInt(int64(int32(x)))
|
||||
}
|
||||
|
||||
// Get gets the bits pointed at by p, as a uint32.
|
||||
func word32_Get(p word32) uint32 {
|
||||
elem := p.v.Elem()
|
||||
switch elem.Kind() {
|
||||
case reflect.Int32:
|
||||
return uint32(elem.Int())
|
||||
case reflect.Uint32:
|
||||
return uint32(elem.Uint())
|
||||
case reflect.Float32:
|
||||
return math.Float32bits(float32(elem.Float()))
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
|
||||
func structPointer_Word32(p structPointer, f field) word32 {
|
||||
return word32{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// A word32Slice is a slice of 32-bit values.
|
||||
// That is, v.Type() is []int32, []uint32, []float32, or []enum.
|
||||
type word32Slice struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
func (p word32Slice) Append(x uint32) {
|
||||
n, m := p.v.Len(), p.v.Cap()
|
||||
if n < m {
|
||||
p.v.SetLen(n + 1)
|
||||
} else {
|
||||
t := p.v.Type().Elem()
|
||||
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
|
||||
}
|
||||
elem := p.v.Index(n)
|
||||
switch elem.Kind() {
|
||||
case reflect.Int32:
|
||||
elem.SetInt(int64(int32(x)))
|
||||
case reflect.Uint32:
|
||||
elem.SetUint(uint64(x))
|
||||
case reflect.Float32:
|
||||
elem.SetFloat(float64(math.Float32frombits(x)))
|
||||
}
|
||||
}
|
||||
|
||||
func (p word32Slice) Len() int {
|
||||
return p.v.Len()
|
||||
}
|
||||
|
||||
func (p word32Slice) Index(i int) uint32 {
|
||||
elem := p.v.Index(i)
|
||||
switch elem.Kind() {
|
||||
case reflect.Int32:
|
||||
return uint32(elem.Int())
|
||||
case reflect.Uint32:
|
||||
return uint32(elem.Uint())
|
||||
case reflect.Float32:
|
||||
return math.Float32bits(float32(elem.Float()))
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
|
||||
func structPointer_Word32Slice(p structPointer, f field) word32Slice {
|
||||
return word32Slice{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// word64 is like word32 but for 64-bit values.
|
||||
type word64 struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
func word64_Set(p word64, o *Buffer, x uint64) {
|
||||
t := p.v.Type().Elem()
|
||||
switch t {
|
||||
case int64Type:
|
||||
if len(o.int64s) == 0 {
|
||||
o.int64s = make([]int64, uint64PoolSize)
|
||||
}
|
||||
o.int64s[0] = int64(x)
|
||||
p.v.Set(reflect.ValueOf(&o.int64s[0]))
|
||||
o.int64s = o.int64s[1:]
|
||||
return
|
||||
case uint64Type:
|
||||
if len(o.uint64s) == 0 {
|
||||
o.uint64s = make([]uint64, uint64PoolSize)
|
||||
}
|
||||
o.uint64s[0] = x
|
||||
p.v.Set(reflect.ValueOf(&o.uint64s[0]))
|
||||
o.uint64s = o.uint64s[1:]
|
||||
return
|
||||
case float64Type:
|
||||
if len(o.float64s) == 0 {
|
||||
o.float64s = make([]float64, uint64PoolSize)
|
||||
}
|
||||
o.float64s[0] = math.Float64frombits(x)
|
||||
p.v.Set(reflect.ValueOf(&o.float64s[0]))
|
||||
o.float64s = o.float64s[1:]
|
||||
return
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func word64_IsNil(p word64) bool {
|
||||
return p.v.IsNil()
|
||||
}
|
||||
|
||||
func word64_Get(p word64) uint64 {
|
||||
elem := p.v.Elem()
|
||||
switch elem.Kind() {
|
||||
case reflect.Int64:
|
||||
return uint64(elem.Int())
|
||||
case reflect.Uint64:
|
||||
return elem.Uint()
|
||||
case reflect.Float64:
|
||||
return math.Float64bits(elem.Float())
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func structPointer_Word64(p structPointer, f field) word64 {
|
||||
return word64{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
type word64Slice struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
func (p word64Slice) Append(x uint64) {
|
||||
n, m := p.v.Len(), p.v.Cap()
|
||||
if n < m {
|
||||
p.v.SetLen(n + 1)
|
||||
} else {
|
||||
t := p.v.Type().Elem()
|
||||
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
|
||||
}
|
||||
elem := p.v.Index(n)
|
||||
switch elem.Kind() {
|
||||
case reflect.Int64:
|
||||
elem.SetInt(int64(int64(x)))
|
||||
case reflect.Uint64:
|
||||
elem.SetUint(uint64(x))
|
||||
case reflect.Float64:
|
||||
elem.SetFloat(float64(math.Float64frombits(x)))
|
||||
}
|
||||
}
|
||||
|
||||
func (p word64Slice) Len() int {
|
||||
return p.v.Len()
|
||||
}
|
||||
|
||||
func (p word64Slice) Index(i int) uint64 {
|
||||
elem := p.v.Index(i)
|
||||
switch elem.Kind() {
|
||||
case reflect.Int64:
|
||||
return uint64(elem.Int())
|
||||
case reflect.Uint64:
|
||||
return uint64(elem.Uint())
|
||||
case reflect.Float64:
|
||||
return math.Float64bits(float64(elem.Float()))
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func structPointer_Word64Slice(p structPointer, f field) word64Slice {
|
||||
return word64Slice{structPointer_field(p, f)}
|
||||
}
|
||||
218
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/pointer_unsafe.go
generated
vendored
Normal file
218
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/pointer_unsafe.go
generated
vendored
Normal file
@@ -0,0 +1,218 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// http://code.google.com/p/goprotobuf/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build !appengine appenginevm
|
||||
|
||||
// This file contains the implementation of the proto field accesses using package unsafe.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// NOTE: These type_Foo functions would more idiomatically be methods,
|
||||
// but Go does not allow methods on pointer types, and we must preserve
|
||||
// some pointer type for the garbage collector. We use these
|
||||
// funcs with clunky names as our poor approximation to methods.
|
||||
//
|
||||
// An alternative would be
|
||||
// type structPointer struct { p unsafe.Pointer }
|
||||
// but that does not registerize as well.
|
||||
|
||||
// A structPointer is a pointer to a struct.
|
||||
type structPointer unsafe.Pointer
|
||||
|
||||
// toStructPointer returns a structPointer equivalent to the given reflect value.
|
||||
func toStructPointer(v reflect.Value) structPointer {
|
||||
return structPointer(unsafe.Pointer(v.Pointer()))
|
||||
}
|
||||
|
||||
// IsNil reports whether p is nil.
|
||||
func structPointer_IsNil(p structPointer) bool {
|
||||
return p == nil
|
||||
}
|
||||
|
||||
// Interface returns the struct pointer, assumed to have element type t,
|
||||
// as an interface value.
|
||||
func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
|
||||
return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
|
||||
}
|
||||
|
||||
// A field identifies a field in a struct, accessible from a structPointer.
|
||||
// In this implementation, a field is identified by its byte offset from the start of the struct.
|
||||
type field uintptr
|
||||
|
||||
// toField returns a field equivalent to the given reflect field.
|
||||
func toField(f *reflect.StructField) field {
|
||||
return field(f.Offset)
|
||||
}
|
||||
|
||||
// invalidField is an invalid field identifier.
|
||||
const invalidField = ^field(0)
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool {
|
||||
return f != ^field(0)
|
||||
}
|
||||
|
||||
// Bytes returns the address of a []byte field in the struct.
|
||||
func structPointer_Bytes(p structPointer, f field) *[]byte {
|
||||
return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// BytesSlice returns the address of a [][]byte field in the struct.
|
||||
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
|
||||
return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// Bool returns the address of a *bool field in the struct.
|
||||
func structPointer_Bool(p structPointer, f field) **bool {
|
||||
return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// BoolSlice returns the address of a []bool field in the struct.
|
||||
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
|
||||
return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// String returns the address of a *string field in the struct.
|
||||
func structPointer_String(p structPointer, f field) **string {
|
||||
return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// StringSlice returns the address of a []string field in the struct.
|
||||
func structPointer_StringSlice(p structPointer, f field) *[]string {
|
||||
return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// ExtMap returns the address of an extension map field in the struct.
|
||||
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
||||
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// SetStructPointer writes a *struct field in the struct.
|
||||
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
|
||||
*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
|
||||
}
|
||||
|
||||
// GetStructPointer reads a *struct field in the struct.
|
||||
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
|
||||
return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// StructPointerSlice the address of a []*struct field in the struct.
|
||||
func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
|
||||
return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
|
||||
type structPointerSlice []structPointer
|
||||
|
||||
func (v *structPointerSlice) Len() int { return len(*v) }
|
||||
func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
|
||||
func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
|
||||
|
||||
// A word32 is the address of a "pointer to 32-bit value" field.
|
||||
type word32 **uint32
|
||||
|
||||
// IsNil reports whether *v is nil.
|
||||
func word32_IsNil(p word32) bool {
|
||||
return *p == nil
|
||||
}
|
||||
|
||||
// Set sets *v to point at a newly allocated word set to x.
|
||||
func word32_Set(p word32, o *Buffer, x uint32) {
|
||||
if len(o.uint32s) == 0 {
|
||||
o.uint32s = make([]uint32, uint32PoolSize)
|
||||
}
|
||||
o.uint32s[0] = x
|
||||
*p = &o.uint32s[0]
|
||||
o.uint32s = o.uint32s[1:]
|
||||
}
|
||||
|
||||
// Get gets the value pointed at by *v.
|
||||
func word32_Get(p word32) uint32 {
|
||||
return **p
|
||||
}
|
||||
|
||||
// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
|
||||
func structPointer_Word32(p structPointer, f field) word32 {
|
||||
return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
||||
}
|
||||
|
||||
// A word32Slice is a slice of 32-bit values.
|
||||
type word32Slice []uint32
|
||||
|
||||
func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
|
||||
func (v *word32Slice) Len() int { return len(*v) }
|
||||
func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
|
||||
|
||||
// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
|
||||
func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
|
||||
return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// word64 is like word32 but for 64-bit values.
|
||||
type word64 **uint64
|
||||
|
||||
func word64_Set(p word64, o *Buffer, x uint64) {
|
||||
if len(o.uint64s) == 0 {
|
||||
o.uint64s = make([]uint64, uint64PoolSize)
|
||||
}
|
||||
o.uint64s[0] = x
|
||||
*p = &o.uint64s[0]
|
||||
o.uint64s = o.uint64s[1:]
|
||||
}
|
||||
|
||||
func word64_IsNil(p word64) bool {
|
||||
return *p == nil
|
||||
}
|
||||
|
||||
func word64_Get(p word64) uint64 {
|
||||
return **p
|
||||
}
|
||||
|
||||
func structPointer_Word64(p structPointer, f field) word64 {
|
||||
return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
||||
}
|
||||
|
||||
// word64Slice is like word32Slice but for 64-bit values.
|
||||
type word64Slice []uint64
|
||||
|
||||
func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
|
||||
func (v *word64Slice) Len() int { return len(*v) }
|
||||
func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
|
||||
|
||||
func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
|
||||
return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
662
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/properties.go
generated
vendored
Normal file
662
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/properties.go
generated
vendored
Normal file
@@ -0,0 +1,662 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// http://code.google.com/p/goprotobuf/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for encoding data into the wire format for protocol buffers.
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const debug bool = false
|
||||
|
||||
// Constants that identify the encoding of a value on the wire.
|
||||
const (
|
||||
WireVarint = 0
|
||||
WireFixed64 = 1
|
||||
WireBytes = 2
|
||||
WireStartGroup = 3
|
||||
WireEndGroup = 4
|
||||
WireFixed32 = 5
|
||||
)
|
||||
|
||||
const startSize = 10 // initial slice/string sizes
|
||||
|
||||
// Encoders are defined in encode.go
|
||||
// An encoder outputs the full representation of a field, including its
|
||||
// tag and encoder type.
|
||||
type encoder func(p *Buffer, prop *Properties, base structPointer) error
|
||||
|
||||
// A valueEncoder encodes a single integer in a particular encoding.
|
||||
type valueEncoder func(o *Buffer, x uint64) error
|
||||
|
||||
// Sizers are defined in encode.go
|
||||
// A sizer returns the encoded size of a field, including its tag and encoder
|
||||
// type.
|
||||
type sizer func(prop *Properties, base structPointer) int
|
||||
|
||||
// A valueSizer returns the encoded size of a single integer in a particular
|
||||
// encoding.
|
||||
type valueSizer func(x uint64) int
|
||||
|
||||
// Decoders are defined in decode.go
|
||||
// A decoder creates a value from its wire representation.
|
||||
// Unrecognized subelements are saved in unrec.
|
||||
type decoder func(p *Buffer, prop *Properties, base structPointer) error
|
||||
|
||||
// A valueDecoder decodes a single integer in a particular encoding.
|
||||
type valueDecoder func(o *Buffer) (x uint64, err error)
|
||||
|
||||
// tagMap is an optimization over map[int]int for typical protocol buffer
|
||||
// use-cases. Encoded protocol buffers are often in tag order with small tag
|
||||
// numbers.
|
||||
type tagMap struct {
|
||||
fastTags []int
|
||||
slowTags map[int]int
|
||||
}
|
||||
|
||||
// tagMapFastLimit is the upper bound on the tag number that will be stored in
|
||||
// the tagMap slice rather than its map.
|
||||
const tagMapFastLimit = 1024
|
||||
|
||||
func (p *tagMap) get(t int) (int, bool) {
|
||||
if t > 0 && t < tagMapFastLimit {
|
||||
if t >= len(p.fastTags) {
|
||||
return 0, false
|
||||
}
|
||||
fi := p.fastTags[t]
|
||||
return fi, fi >= 0
|
||||
}
|
||||
fi, ok := p.slowTags[t]
|
||||
return fi, ok
|
||||
}
|
||||
|
||||
func (p *tagMap) put(t int, fi int) {
|
||||
if t > 0 && t < tagMapFastLimit {
|
||||
for len(p.fastTags) < t+1 {
|
||||
p.fastTags = append(p.fastTags, -1)
|
||||
}
|
||||
p.fastTags[t] = fi
|
||||
return
|
||||
}
|
||||
if p.slowTags == nil {
|
||||
p.slowTags = make(map[int]int)
|
||||
}
|
||||
p.slowTags[t] = fi
|
||||
}
|
||||
|
||||
// StructProperties represents properties for all the fields of a struct.
|
||||
// decoderTags and decoderOrigNames should only be used by the decoder.
|
||||
type StructProperties struct {
|
||||
Prop []*Properties // properties for each field
|
||||
reqCount int // required count
|
||||
decoderTags tagMap // map from proto tag to struct field number
|
||||
decoderOrigNames map[string]int // map from original name to struct field number
|
||||
order []int // list of struct field numbers in tag order
|
||||
unrecField field // field id of the XXX_unrecognized []byte field
|
||||
extendable bool // is this an extendable proto
|
||||
}
|
||||
|
||||
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
|
||||
// See encode.go, (*Buffer).enc_struct.
|
||||
|
||||
func (sp *StructProperties) Len() int { return len(sp.order) }
|
||||
func (sp *StructProperties) Less(i, j int) bool {
|
||||
return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
|
||||
}
|
||||
func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
|
||||
|
||||
// Properties represents the protocol-specific behavior of a single struct field.
|
||||
type Properties struct {
|
||||
Name string // name of the field, for error messages
|
||||
OrigName string // original name before protocol compiler (always set)
|
||||
Wire string
|
||||
WireType int
|
||||
Tag int
|
||||
Required bool
|
||||
Optional bool
|
||||
Repeated bool
|
||||
Packed bool // relevant for repeated primitives only
|
||||
Enum string // set for enum types only
|
||||
|
||||
Default string // default value
|
||||
HasDefault bool // whether an explicit default was provided
|
||||
def_uint64 uint64
|
||||
|
||||
enc encoder
|
||||
valEnc valueEncoder // set for bool and numeric types only
|
||||
field field
|
||||
tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
|
||||
tagbuf [8]byte
|
||||
stype reflect.Type // set for struct types only
|
||||
sprop *StructProperties // set for struct types only
|
||||
isMarshaler bool
|
||||
isUnmarshaler bool
|
||||
|
||||
size sizer
|
||||
valSize valueSizer // set for bool and numeric types only
|
||||
|
||||
dec decoder
|
||||
valDec valueDecoder // set for bool and numeric types only
|
||||
|
||||
// If this is a packable field, this will be the decoder for the packed version of the field.
|
||||
packedDec decoder
|
||||
}
|
||||
|
||||
// String formats the properties in the protobuf struct field tag style.
|
||||
func (p *Properties) String() string {
|
||||
s := p.Wire
|
||||
s = ","
|
||||
s += strconv.Itoa(p.Tag)
|
||||
if p.Required {
|
||||
s += ",req"
|
||||
}
|
||||
if p.Optional {
|
||||
s += ",opt"
|
||||
}
|
||||
if p.Repeated {
|
||||
s += ",rep"
|
||||
}
|
||||
if p.Packed {
|
||||
s += ",packed"
|
||||
}
|
||||
if p.OrigName != p.Name {
|
||||
s += ",name=" + p.OrigName
|
||||
}
|
||||
if len(p.Enum) > 0 {
|
||||
s += ",enum=" + p.Enum
|
||||
}
|
||||
if p.HasDefault {
|
||||
s += ",def=" + p.Default
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Parse populates p by parsing a string in the protobuf struct field tag style.
|
||||
func (p *Properties) Parse(s string) {
|
||||
// "bytes,49,opt,name=foo,def=hello!"
|
||||
fields := strings.Split(s, ",") // breaks def=, but handled below.
|
||||
if len(fields) < 2 {
|
||||
fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
|
||||
return
|
||||
}
|
||||
|
||||
p.Wire = fields[0]
|
||||
switch p.Wire {
|
||||
case "varint":
|
||||
p.WireType = WireVarint
|
||||
p.valEnc = (*Buffer).EncodeVarint
|
||||
p.valDec = (*Buffer).DecodeVarint
|
||||
p.valSize = sizeVarint
|
||||
case "fixed32":
|
||||
p.WireType = WireFixed32
|
||||
p.valEnc = (*Buffer).EncodeFixed32
|
||||
p.valDec = (*Buffer).DecodeFixed32
|
||||
p.valSize = sizeFixed32
|
||||
case "fixed64":
|
||||
p.WireType = WireFixed64
|
||||
p.valEnc = (*Buffer).EncodeFixed64
|
||||
p.valDec = (*Buffer).DecodeFixed64
|
||||
p.valSize = sizeFixed64
|
||||
case "zigzag32":
|
||||
p.WireType = WireVarint
|
||||
p.valEnc = (*Buffer).EncodeZigzag32
|
||||
p.valDec = (*Buffer).DecodeZigzag32
|
||||
p.valSize = sizeZigzag32
|
||||
case "zigzag64":
|
||||
p.WireType = WireVarint
|
||||
p.valEnc = (*Buffer).EncodeZigzag64
|
||||
p.valDec = (*Buffer).DecodeZigzag64
|
||||
p.valSize = sizeZigzag64
|
||||
case "bytes", "group":
|
||||
p.WireType = WireBytes
|
||||
// no numeric converter for non-numeric types
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
p.Tag, err = strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i := 2; i < len(fields); i++ {
|
||||
f := fields[i]
|
||||
switch {
|
||||
case f == "req":
|
||||
p.Required = true
|
||||
case f == "opt":
|
||||
p.Optional = true
|
||||
case f == "rep":
|
||||
p.Repeated = true
|
||||
case f == "packed":
|
||||
p.Packed = true
|
||||
case strings.HasPrefix(f, "name="):
|
||||
p.OrigName = f[5:]
|
||||
case strings.HasPrefix(f, "enum="):
|
||||
p.Enum = f[5:]
|
||||
case strings.HasPrefix(f, "def="):
|
||||
p.HasDefault = true
|
||||
p.Default = f[4:] // rest of string
|
||||
if i+1 < len(fields) {
|
||||
// Commas aren't escaped, and def is always last.
|
||||
p.Default += "," + strings.Join(fields[i+1:], ",")
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func logNoSliceEnc(t1, t2 reflect.Type) {
|
||||
fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
|
||||
}
|
||||
|
||||
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
|
||||
|
||||
// Initialize the fields for encoding and decoding.
|
||||
func (p *Properties) setEncAndDec(typ reflect.Type, lockGetProp bool) {
|
||||
p.enc = nil
|
||||
p.dec = nil
|
||||
p.size = nil
|
||||
|
||||
switch t1 := typ; t1.Kind() {
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
|
||||
|
||||
case reflect.Ptr:
|
||||
switch t2 := t1.Elem(); t2.Kind() {
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: no encoder function for %T -> %T\n", t1, t2)
|
||||
break
|
||||
case reflect.Bool:
|
||||
p.enc = (*Buffer).enc_bool
|
||||
p.dec = (*Buffer).dec_bool
|
||||
p.size = size_bool
|
||||
case reflect.Int32:
|
||||
p.enc = (*Buffer).enc_int32
|
||||
p.dec = (*Buffer).dec_int32
|
||||
p.size = size_int32
|
||||
case reflect.Uint32:
|
||||
p.enc = (*Buffer).enc_uint32
|
||||
p.dec = (*Buffer).dec_int32 // can reuse
|
||||
p.size = size_uint32
|
||||
case reflect.Int64, reflect.Uint64:
|
||||
p.enc = (*Buffer).enc_int64
|
||||
p.dec = (*Buffer).dec_int64
|
||||
p.size = size_int64
|
||||
case reflect.Float32:
|
||||
p.enc = (*Buffer).enc_uint32 // can just treat them as bits
|
||||
p.dec = (*Buffer).dec_int32
|
||||
p.size = size_uint32
|
||||
case reflect.Float64:
|
||||
p.enc = (*Buffer).enc_int64 // can just treat them as bits
|
||||
p.dec = (*Buffer).dec_int64
|
||||
p.size = size_int64
|
||||
case reflect.String:
|
||||
p.enc = (*Buffer).enc_string
|
||||
p.dec = (*Buffer).dec_string
|
||||
p.size = size_string
|
||||
case reflect.Struct:
|
||||
p.stype = t1.Elem()
|
||||
p.isMarshaler = isMarshaler(t1)
|
||||
p.isUnmarshaler = isUnmarshaler(t1)
|
||||
if p.Wire == "bytes" {
|
||||
p.enc = (*Buffer).enc_struct_message
|
||||
p.dec = (*Buffer).dec_struct_message
|
||||
p.size = size_struct_message
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_struct_group
|
||||
p.dec = (*Buffer).dec_struct_group
|
||||
p.size = size_struct_group
|
||||
}
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
switch t2 := t1.Elem(); t2.Kind() {
|
||||
default:
|
||||
logNoSliceEnc(t1, t2)
|
||||
break
|
||||
case reflect.Bool:
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_bool
|
||||
p.size = size_slice_packed_bool
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_bool
|
||||
p.size = size_slice_bool
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_bool
|
||||
p.packedDec = (*Buffer).dec_slice_packed_bool
|
||||
case reflect.Int32:
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_int32
|
||||
p.size = size_slice_packed_int32
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_int32
|
||||
p.size = size_slice_int32
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_int32
|
||||
p.packedDec = (*Buffer).dec_slice_packed_int32
|
||||
case reflect.Uint32:
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_uint32
|
||||
p.size = size_slice_packed_uint32
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_uint32
|
||||
p.size = size_slice_uint32
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_int32
|
||||
p.packedDec = (*Buffer).dec_slice_packed_int32
|
||||
case reflect.Int64, reflect.Uint64:
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_int64
|
||||
p.size = size_slice_packed_int64
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_int64
|
||||
p.size = size_slice_int64
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_int64
|
||||
p.packedDec = (*Buffer).dec_slice_packed_int64
|
||||
case reflect.Uint8:
|
||||
p.enc = (*Buffer).enc_slice_byte
|
||||
p.dec = (*Buffer).dec_slice_byte
|
||||
p.size = size_slice_byte
|
||||
case reflect.Float32, reflect.Float64:
|
||||
switch t2.Bits() {
|
||||
case 32:
|
||||
// can just treat them as bits
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_uint32
|
||||
p.size = size_slice_packed_uint32
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_uint32
|
||||
p.size = size_slice_uint32
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_int32
|
||||
p.packedDec = (*Buffer).dec_slice_packed_int32
|
||||
case 64:
|
||||
// can just treat them as bits
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_int64
|
||||
p.size = size_slice_packed_int64
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_int64
|
||||
p.size = size_slice_int64
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_int64
|
||||
p.packedDec = (*Buffer).dec_slice_packed_int64
|
||||
default:
|
||||
logNoSliceEnc(t1, t2)
|
||||
break
|
||||
}
|
||||
case reflect.String:
|
||||
p.enc = (*Buffer).enc_slice_string
|
||||
p.dec = (*Buffer).dec_slice_string
|
||||
p.size = size_slice_string
|
||||
case reflect.Ptr:
|
||||
switch t3 := t2.Elem(); t3.Kind() {
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
|
||||
break
|
||||
case reflect.Struct:
|
||||
p.stype = t2.Elem()
|
||||
p.isMarshaler = isMarshaler(t2)
|
||||
p.isUnmarshaler = isUnmarshaler(t2)
|
||||
if p.Wire == "bytes" {
|
||||
p.enc = (*Buffer).enc_slice_struct_message
|
||||
p.dec = (*Buffer).dec_slice_struct_message
|
||||
p.size = size_slice_struct_message
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_struct_group
|
||||
p.dec = (*Buffer).dec_slice_struct_group
|
||||
p.size = size_slice_struct_group
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
switch t2.Elem().Kind() {
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
|
||||
break
|
||||
case reflect.Uint8:
|
||||
p.enc = (*Buffer).enc_slice_slice_byte
|
||||
p.dec = (*Buffer).dec_slice_slice_byte
|
||||
p.size = size_slice_slice_byte
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// precalculate tag code
|
||||
wire := p.WireType
|
||||
if p.Packed {
|
||||
wire = WireBytes
|
||||
}
|
||||
x := uint32(p.Tag)<<3 | uint32(wire)
|
||||
i := 0
|
||||
for i = 0; x > 127; i++ {
|
||||
p.tagbuf[i] = 0x80 | uint8(x&0x7F)
|
||||
x >>= 7
|
||||
}
|
||||
p.tagbuf[i] = uint8(x)
|
||||
p.tagcode = p.tagbuf[0 : i+1]
|
||||
|
||||
if p.stype != nil {
|
||||
if lockGetProp {
|
||||
p.sprop = GetProperties(p.stype)
|
||||
} else {
|
||||
p.sprop = getPropertiesLocked(p.stype)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
|
||||
)
|
||||
|
||||
// isMarshaler reports whether type t implements Marshaler.
|
||||
func isMarshaler(t reflect.Type) bool {
|
||||
// We're checking for (likely) pointer-receiver methods
|
||||
// so if t is not a pointer, something is very wrong.
|
||||
// The calls above only invoke isMarshaler on pointer types.
|
||||
if t.Kind() != reflect.Ptr {
|
||||
panic("proto: misuse of isMarshaler")
|
||||
}
|
||||
return t.Implements(marshalerType)
|
||||
}
|
||||
|
||||
// isUnmarshaler reports whether type t implements Unmarshaler.
|
||||
func isUnmarshaler(t reflect.Type) bool {
|
||||
// We're checking for (likely) pointer-receiver methods
|
||||
// so if t is not a pointer, something is very wrong.
|
||||
// The calls above only invoke isUnmarshaler on pointer types.
|
||||
if t.Kind() != reflect.Ptr {
|
||||
panic("proto: misuse of isUnmarshaler")
|
||||
}
|
||||
return t.Implements(unmarshalerType)
|
||||
}
|
||||
|
||||
// Init populates the properties from a protocol buffer struct tag.
|
||||
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
||||
p.init(typ, name, tag, f, true)
|
||||
}
|
||||
|
||||
func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
|
||||
// "bytes,49,opt,def=hello!"
|
||||
p.Name = name
|
||||
p.OrigName = name
|
||||
if f != nil {
|
||||
p.field = toField(f)
|
||||
}
|
||||
if tag == "" {
|
||||
return
|
||||
}
|
||||
p.Parse(tag)
|
||||
p.setEncAndDec(typ, lockGetProp)
|
||||
}
|
||||
|
||||
var (
|
||||
mutex sync.Mutex
|
||||
propertiesMap = make(map[reflect.Type]*StructProperties)
|
||||
)
|
||||
|
||||
// GetProperties returns the list of properties for the type represented by t.
|
||||
// t must represent a generated struct type of a protocol message.
|
||||
func GetProperties(t reflect.Type) *StructProperties {
|
||||
if t.Kind() != reflect.Struct {
|
||||
panic("proto: type must have kind struct")
|
||||
}
|
||||
mutex.Lock()
|
||||
sprop := getPropertiesLocked(t)
|
||||
mutex.Unlock()
|
||||
return sprop
|
||||
}
|
||||
|
||||
// getPropertiesLocked requires that mutex is held.
|
||||
func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||
if prop, ok := propertiesMap[t]; ok {
|
||||
if collectStats {
|
||||
stats.Chit++
|
||||
}
|
||||
return prop
|
||||
}
|
||||
if collectStats {
|
||||
stats.Cmiss++
|
||||
}
|
||||
|
||||
prop := new(StructProperties)
|
||||
// in case of recursive protos, fill this in now.
|
||||
propertiesMap[t] = prop
|
||||
|
||||
// build properties
|
||||
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
|
||||
prop.unrecField = invalidField
|
||||
prop.Prop = make([]*Properties, t.NumField())
|
||||
prop.order = make([]int, t.NumField())
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
p := new(Properties)
|
||||
name := f.Name
|
||||
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
|
||||
|
||||
if f.Name == "XXX_extensions" { // special case
|
||||
p.enc = (*Buffer).enc_map
|
||||
p.dec = nil // not needed
|
||||
p.size = size_map
|
||||
}
|
||||
if f.Name == "XXX_unrecognized" { // special case
|
||||
prop.unrecField = toField(&f)
|
||||
}
|
||||
prop.Prop[i] = p
|
||||
prop.order[i] = i
|
||||
if debug {
|
||||
print(i, " ", f.Name, " ", t.String(), " ")
|
||||
if p.Tag > 0 {
|
||||
print(p.String())
|
||||
}
|
||||
print("\n")
|
||||
}
|
||||
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") {
|
||||
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
|
||||
}
|
||||
}
|
||||
|
||||
// Re-order prop.order.
|
||||
sort.Sort(prop)
|
||||
|
||||
// build required counts
|
||||
// build tags
|
||||
reqCount := 0
|
||||
prop.decoderOrigNames = make(map[string]int)
|
||||
for i, p := range prop.Prop {
|
||||
if strings.HasPrefix(p.Name, "XXX_") {
|
||||
// Internal fields should not appear in tags/origNames maps.
|
||||
// They are handled specially when encoding and decoding.
|
||||
continue
|
||||
}
|
||||
if p.Required {
|
||||
reqCount++
|
||||
}
|
||||
prop.decoderTags.put(p.Tag, i)
|
||||
prop.decoderOrigNames[p.OrigName] = i
|
||||
}
|
||||
prop.reqCount = reqCount
|
||||
|
||||
return prop
|
||||
}
|
||||
|
||||
// Return the Properties object for the x[0]'th field of the structure.
|
||||
func propByIndex(t reflect.Type, x []int) *Properties {
|
||||
if len(x) != 1 {
|
||||
fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
|
||||
return nil
|
||||
}
|
||||
prop := GetProperties(t)
|
||||
return prop.Prop[x[0]]
|
||||
}
|
||||
|
||||
// Get the address and type of a pointer to a struct from an interface.
|
||||
func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
|
||||
if pb == nil {
|
||||
err = ErrNil
|
||||
return
|
||||
}
|
||||
// get the reflect type of the pointer to the struct.
|
||||
t = reflect.TypeOf(pb)
|
||||
// get the address of the struct.
|
||||
value := reflect.ValueOf(pb)
|
||||
b = toStructPointer(value)
|
||||
return
|
||||
}
|
||||
|
||||
// A global registry of enum types.
|
||||
// The generated code will register the generated maps by calling RegisterEnum.
|
||||
|
||||
var enumValueMaps = make(map[string]map[string]int32)
|
||||
|
||||
// RegisterEnum is called from the generated code to install the enum descriptor
|
||||
// maps into the global table to aid parsing text format protocol buffers.
|
||||
func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
|
||||
if _, ok := enumValueMaps[typeName]; ok {
|
||||
panic("proto: duplicate enum registered: " + typeName)
|
||||
}
|
||||
enumValueMaps[typeName] = valueMap
|
||||
}
|
||||
63
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/size2_test.go
generated
vendored
Normal file
63
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/size2_test.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// http://code.google.com/p/goprotobuf/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// This is a separate file and package from size_test.go because that one uses
|
||||
// generated messages and thus may not be in package proto without having a circular
|
||||
// dependency, whereas this file tests unexported details of size.go.
|
||||
|
||||
func TestVarintSize(t *testing.T) {
|
||||
// Check the edge cases carefully.
|
||||
testCases := []struct {
|
||||
n uint64
|
||||
size int
|
||||
}{
|
||||
{0, 1},
|
||||
{1, 1},
|
||||
{127, 1},
|
||||
{128, 2},
|
||||
{16383, 2},
|
||||
{16384, 3},
|
||||
{1<<63 - 1, 9},
|
||||
{1 << 63, 10},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
size := sizeVarint(tc.n)
|
||||
if size != tc.size {
|
||||
t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size)
|
||||
}
|
||||
}
|
||||
}
|
||||
120
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/size_test.go
generated
vendored
Normal file
120
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/size_test.go
generated
vendored
Normal file
@@ -0,0 +1,120 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// http://code.google.com/p/goprotobuf/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"log"
|
||||
"testing"
|
||||
|
||||
pb "./testdata"
|
||||
. "code.google.com/p/goprotobuf/proto"
|
||||
)
|
||||
|
||||
var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)}
|
||||
|
||||
// messageWithExtension2 is in equal_test.go.
|
||||
var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)}
|
||||
|
||||
func init() {
|
||||
if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil {
|
||||
log.Panicf("SetExtension: %v", err)
|
||||
}
|
||||
if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil {
|
||||
log.Panicf("SetExtension: %v", err)
|
||||
}
|
||||
|
||||
// Force messageWithExtension3 to have the extension encoded.
|
||||
Marshal(messageWithExtension3)
|
||||
|
||||
}
|
||||
|
||||
var SizeTests = []struct {
|
||||
desc string
|
||||
pb Message
|
||||
}{
|
||||
{"empty", &pb.OtherMessage{}},
|
||||
// Basic types.
|
||||
{"bool", &pb.Defaults{F_Bool: Bool(true)}},
|
||||
{"int32", &pb.Defaults{F_Int32: Int32(12)}},
|
||||
{"negative int32", &pb.Defaults{F_Int32: Int32(-1)}},
|
||||
{"small int64", &pb.Defaults{F_Int64: Int64(1)}},
|
||||
{"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}},
|
||||
{"negative int64", &pb.Defaults{F_Int64: Int64(-1)}},
|
||||
{"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}},
|
||||
{"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}},
|
||||
{"uint32", &pb.Defaults{F_Uint32: Uint32(123)}},
|
||||
{"uint64", &pb.Defaults{F_Uint64: Uint64(124)}},
|
||||
{"float", &pb.Defaults{F_Float: Float32(12.6)}},
|
||||
{"double", &pb.Defaults{F_Double: Float64(13.9)}},
|
||||
{"string", &pb.Defaults{F_String: String("niles")}},
|
||||
{"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}},
|
||||
{"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}},
|
||||
{"sint32", &pb.Defaults{F_Sint32: Int32(65)}},
|
||||
{"sint64", &pb.Defaults{F_Sint64: Int64(67)}},
|
||||
{"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}},
|
||||
// Repeated.
|
||||
{"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}},
|
||||
{"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}},
|
||||
{"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}},
|
||||
{"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}},
|
||||
{"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}},
|
||||
{"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{
|
||||
// Need enough large numbers to verify that the header is counting the number of bytes
|
||||
// for the field, not the number of elements.
|
||||
1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
|
||||
1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
|
||||
}}},
|
||||
{"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}},
|
||||
{"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}},
|
||||
// Nested.
|
||||
{"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}},
|
||||
{"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}},
|
||||
// Other things.
|
||||
{"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}},
|
||||
{"extension (unencoded)", messageWithExtension1},
|
||||
{"extension (encoded)", messageWithExtension3},
|
||||
}
|
||||
|
||||
func TestSize(t *testing.T) {
|
||||
for _, tc := range SizeTests {
|
||||
size := Size(tc.pb)
|
||||
b, err := Marshal(tc.pb)
|
||||
if err != nil {
|
||||
t.Errorf("%v: Marshal failed: %v", tc.desc, err)
|
||||
continue
|
||||
}
|
||||
if size != len(b) {
|
||||
t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b))
|
||||
t.Logf("%v: bytes: %#v", tc.desc, b)
|
||||
}
|
||||
}
|
||||
}
|
||||
50
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/Makefile
generated
vendored
Normal file
50
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/Makefile
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
# Go support for Protocol Buffers - Google's data interchange format
|
||||
#
|
||||
# Copyright 2010 The Go Authors. All rights reserved.
|
||||
# http://code.google.com/p/goprotobuf/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
include ../../Make.protobuf
|
||||
|
||||
all: regenerate
|
||||
|
||||
regenerate:
|
||||
rm -f test.pb.go
|
||||
make test.pb.go
|
||||
|
||||
# The following rules are just aids to development. Not needed for typical testing.
|
||||
|
||||
diff: regenerate
|
||||
hg diff test.pb.go
|
||||
|
||||
restore:
|
||||
cp test.pb.go.golden test.pb.go
|
||||
|
||||
preserve:
|
||||
cp test.pb.go test.pb.go.golden
|
||||
86
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/golden_test.go
generated
vendored
Normal file
86
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/golden_test.go
generated
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// http://code.google.com/p/goprotobuf/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Verify that the compiler output for test.proto is unchanged.
|
||||
|
||||
package testdata
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// sum returns in string form (for easy comparison) the SHA-1 hash of the named file.
|
||||
func sum(t *testing.T, name string) string {
|
||||
data, err := ioutil.ReadFile(name)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("sum(%q): length is %d", name, len(data))
|
||||
hash := sha1.New()
|
||||
_, err = hash.Write(data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return fmt.Sprintf("% x", hash.Sum(nil))
|
||||
}
|
||||
|
||||
func run(t *testing.T, name string, args ...string) {
|
||||
cmd := exec.Command(name, args...)
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGolden(t *testing.T) {
|
||||
// Compute the original checksum.
|
||||
goldenSum := sum(t, "test.pb.go")
|
||||
// Run the proto compiler.
|
||||
run(t, "protoc", "--go_out="+os.TempDir(), "test.proto")
|
||||
newFile := filepath.Join(os.TempDir(), "test.pb.go")
|
||||
defer os.Remove(newFile)
|
||||
// Compute the new checksum.
|
||||
newSum := sum(t, newFile)
|
||||
// Verify
|
||||
if newSum != goldenSum {
|
||||
run(t, "diff", "-u", "test.pb.go", newFile)
|
||||
t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go")
|
||||
}
|
||||
}
|
||||
2356
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/test.pb.go
generated
vendored
Normal file
2356
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/test.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
428
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/test.proto
generated
vendored
Normal file
428
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/test.proto
generated
vendored
Normal file
@@ -0,0 +1,428 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// http://code.google.com/p/goprotobuf/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// A feature-rich test file for the protocol compiler and libraries.
|
||||
|
||||
syntax = "proto2";
|
||||
|
||||
package testdata;
|
||||
|
||||
enum FOO { FOO1 = 1; };
|
||||
|
||||
message GoEnum {
|
||||
required FOO foo = 1;
|
||||
}
|
||||
|
||||
message GoTestField {
|
||||
required string Label = 1;
|
||||
required string Type = 2;
|
||||
}
|
||||
|
||||
message GoTest {
|
||||
// An enum, for completeness.
|
||||
enum KIND {
|
||||
VOID = 0;
|
||||
|
||||
// Basic types
|
||||
BOOL = 1;
|
||||
BYTES = 2;
|
||||
FINGERPRINT = 3;
|
||||
FLOAT = 4;
|
||||
INT = 5;
|
||||
STRING = 6;
|
||||
TIME = 7;
|
||||
|
||||
// Groupings
|
||||
TUPLE = 8;
|
||||
ARRAY = 9;
|
||||
MAP = 10;
|
||||
|
||||
// Table types
|
||||
TABLE = 11;
|
||||
|
||||
// Functions
|
||||
FUNCTION = 12; // last tag
|
||||
};
|
||||
|
||||
// Some typical parameters
|
||||
required KIND Kind = 1;
|
||||
optional string Table = 2;
|
||||
optional int32 Param = 3;
|
||||
|
||||
// Required, repeated and optional foreign fields.
|
||||
required GoTestField RequiredField = 4;
|
||||
repeated GoTestField RepeatedField = 5;
|
||||
optional GoTestField OptionalField = 6;
|
||||
|
||||
// Required fields of all basic types
|
||||
required bool F_Bool_required = 10;
|
||||
required int32 F_Int32_required = 11;
|
||||
required int64 F_Int64_required = 12;
|
||||
required fixed32 F_Fixed32_required = 13;
|
||||
required fixed64 F_Fixed64_required = 14;
|
||||
required uint32 F_Uint32_required = 15;
|
||||
required uint64 F_Uint64_required = 16;
|
||||
required float F_Float_required = 17;
|
||||
required double F_Double_required = 18;
|
||||
required string F_String_required = 19;
|
||||
required bytes F_Bytes_required = 101;
|
||||
required sint32 F_Sint32_required = 102;
|
||||
required sint64 F_Sint64_required = 103;
|
||||
|
||||
// Repeated fields of all basic types
|
||||
repeated bool F_Bool_repeated = 20;
|
||||
repeated int32 F_Int32_repeated = 21;
|
||||
repeated int64 F_Int64_repeated = 22;
|
||||
repeated fixed32 F_Fixed32_repeated = 23;
|
||||
repeated fixed64 F_Fixed64_repeated = 24;
|
||||
repeated uint32 F_Uint32_repeated = 25;
|
||||
repeated uint64 F_Uint64_repeated = 26;
|
||||
repeated float F_Float_repeated = 27;
|
||||
repeated double F_Double_repeated = 28;
|
||||
repeated string F_String_repeated = 29;
|
||||
repeated bytes F_Bytes_repeated = 201;
|
||||
repeated sint32 F_Sint32_repeated = 202;
|
||||
repeated sint64 F_Sint64_repeated = 203;
|
||||
|
||||
// Optional fields of all basic types
|
||||
optional bool F_Bool_optional = 30;
|
||||
optional int32 F_Int32_optional = 31;
|
||||
optional int64 F_Int64_optional = 32;
|
||||
optional fixed32 F_Fixed32_optional = 33;
|
||||
optional fixed64 F_Fixed64_optional = 34;
|
||||
optional uint32 F_Uint32_optional = 35;
|
||||
optional uint64 F_Uint64_optional = 36;
|
||||
optional float F_Float_optional = 37;
|
||||
optional double F_Double_optional = 38;
|
||||
optional string F_String_optional = 39;
|
||||
optional bytes F_Bytes_optional = 301;
|
||||
optional sint32 F_Sint32_optional = 302;
|
||||
optional sint64 F_Sint64_optional = 303;
|
||||
|
||||
// Default-valued fields of all basic types
|
||||
optional bool F_Bool_defaulted = 40 [default=true];
|
||||
optional int32 F_Int32_defaulted = 41 [default=32];
|
||||
optional int64 F_Int64_defaulted = 42 [default=64];
|
||||
optional fixed32 F_Fixed32_defaulted = 43 [default=320];
|
||||
optional fixed64 F_Fixed64_defaulted = 44 [default=640];
|
||||
optional uint32 F_Uint32_defaulted = 45 [default=3200];
|
||||
optional uint64 F_Uint64_defaulted = 46 [default=6400];
|
||||
optional float F_Float_defaulted = 47 [default=314159.];
|
||||
optional double F_Double_defaulted = 48 [default=271828.];
|
||||
optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"];
|
||||
optional bytes F_Bytes_defaulted = 401 [default="Bignose"];
|
||||
optional sint32 F_Sint32_defaulted = 402 [default = -32];
|
||||
optional sint64 F_Sint64_defaulted = 403 [default = -64];
|
||||
|
||||
// Packed repeated fields (no string or bytes).
|
||||
repeated bool F_Bool_repeated_packed = 50 [packed=true];
|
||||
repeated int32 F_Int32_repeated_packed = 51 [packed=true];
|
||||
repeated int64 F_Int64_repeated_packed = 52 [packed=true];
|
||||
repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true];
|
||||
repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true];
|
||||
repeated uint32 F_Uint32_repeated_packed = 55 [packed=true];
|
||||
repeated uint64 F_Uint64_repeated_packed = 56 [packed=true];
|
||||
repeated float F_Float_repeated_packed = 57 [packed=true];
|
||||
repeated double F_Double_repeated_packed = 58 [packed=true];
|
||||
repeated sint32 F_Sint32_repeated_packed = 502 [packed=true];
|
||||
repeated sint64 F_Sint64_repeated_packed = 503 [packed=true];
|
||||
|
||||
// Required, repeated, and optional groups.
|
||||
required group RequiredGroup = 70 {
|
||||
required string RequiredField = 71;
|
||||
};
|
||||
|
||||
repeated group RepeatedGroup = 80 {
|
||||
required string RequiredField = 81;
|
||||
};
|
||||
|
||||
optional group OptionalGroup = 90 {
|
||||
required string RequiredField = 91;
|
||||
};
|
||||
}
|
||||
|
||||
// For testing skipping of unrecognized fields.
|
||||
// Numbers are all big, larger than tag numbers in GoTestField,
|
||||
// the message used in the corresponding test.
|
||||
message GoSkipTest {
|
||||
required int32 skip_int32 = 11;
|
||||
required fixed32 skip_fixed32 = 12;
|
||||
required fixed64 skip_fixed64 = 13;
|
||||
required string skip_string = 14;
|
||||
required group SkipGroup = 15 {
|
||||
required int32 group_int32 = 16;
|
||||
required string group_string = 17;
|
||||
}
|
||||
}
|
||||
|
||||
// For testing packed/non-packed decoder switching.
|
||||
// A serialized instance of one should be deserializable as the other.
|
||||
message NonPackedTest {
|
||||
repeated int32 a = 1;
|
||||
}
|
||||
|
||||
message PackedTest {
|
||||
repeated int32 b = 1 [packed=true];
|
||||
}
|
||||
|
||||
message MaxTag {
|
||||
// Maximum possible tag number.
|
||||
optional string last_field = 536870911;
|
||||
}
|
||||
|
||||
message OldMessage {
|
||||
message Nested {
|
||||
optional string name = 1;
|
||||
}
|
||||
optional Nested nested = 1;
|
||||
|
||||
optional int32 num = 2;
|
||||
}
|
||||
|
||||
// NewMessage is wire compatible with OldMessage;
|
||||
// imagine it as a future version.
|
||||
message NewMessage {
|
||||
message Nested {
|
||||
optional string name = 1;
|
||||
optional string food_group = 2;
|
||||
}
|
||||
optional Nested nested = 1;
|
||||
|
||||
// This is an int32 in OldMessage.
|
||||
optional int64 num = 2;
|
||||
}
|
||||
|
||||
// Smaller tests for ASCII formatting.
|
||||
|
||||
message InnerMessage {
|
||||
required string host = 1;
|
||||
optional int32 port = 2 [default=4000];
|
||||
optional bool connected = 3;
|
||||
}
|
||||
|
||||
message OtherMessage {
|
||||
optional int64 key = 1;
|
||||
optional bytes value = 2;
|
||||
optional float weight = 3;
|
||||
optional InnerMessage inner = 4;
|
||||
}
|
||||
|
||||
message MyMessage {
|
||||
required int32 count = 1;
|
||||
optional string name = 2;
|
||||
optional string quote = 3;
|
||||
repeated string pet = 4;
|
||||
optional InnerMessage inner = 5;
|
||||
repeated OtherMessage others = 6;
|
||||
repeated InnerMessage rep_inner = 12;
|
||||
|
||||
enum Color {
|
||||
RED = 0;
|
||||
GREEN = 1;
|
||||
BLUE = 2;
|
||||
};
|
||||
optional Color bikeshed = 7;
|
||||
|
||||
optional group SomeGroup = 8 {
|
||||
optional int32 group_field = 9;
|
||||
}
|
||||
|
||||
// This field becomes [][]byte in the generated code.
|
||||
repeated bytes rep_bytes = 10;
|
||||
|
||||
optional double bigfloat = 11;
|
||||
|
||||
extensions 100 to max;
|
||||
}
|
||||
|
||||
message Ext {
|
||||
extend MyMessage {
|
||||
optional Ext more = 103;
|
||||
optional string text = 104;
|
||||
optional int32 number = 105;
|
||||
}
|
||||
|
||||
optional string data = 1;
|
||||
}
|
||||
|
||||
extend MyMessage {
|
||||
repeated string greeting = 106;
|
||||
}
|
||||
|
||||
message MyMessageSet {
|
||||
option message_set_wire_format = true;
|
||||
extensions 100 to max;
|
||||
}
|
||||
|
||||
message Empty {
|
||||
}
|
||||
|
||||
extend MyMessageSet {
|
||||
optional Empty x201 = 201;
|
||||
optional Empty x202 = 202;
|
||||
optional Empty x203 = 203;
|
||||
optional Empty x204 = 204;
|
||||
optional Empty x205 = 205;
|
||||
optional Empty x206 = 206;
|
||||
optional Empty x207 = 207;
|
||||
optional Empty x208 = 208;
|
||||
optional Empty x209 = 209;
|
||||
optional Empty x210 = 210;
|
||||
optional Empty x211 = 211;
|
||||
optional Empty x212 = 212;
|
||||
optional Empty x213 = 213;
|
||||
optional Empty x214 = 214;
|
||||
optional Empty x215 = 215;
|
||||
optional Empty x216 = 216;
|
||||
optional Empty x217 = 217;
|
||||
optional Empty x218 = 218;
|
||||
optional Empty x219 = 219;
|
||||
optional Empty x220 = 220;
|
||||
optional Empty x221 = 221;
|
||||
optional Empty x222 = 222;
|
||||
optional Empty x223 = 223;
|
||||
optional Empty x224 = 224;
|
||||
optional Empty x225 = 225;
|
||||
optional Empty x226 = 226;
|
||||
optional Empty x227 = 227;
|
||||
optional Empty x228 = 228;
|
||||
optional Empty x229 = 229;
|
||||
optional Empty x230 = 230;
|
||||
optional Empty x231 = 231;
|
||||
optional Empty x232 = 232;
|
||||
optional Empty x233 = 233;
|
||||
optional Empty x234 = 234;
|
||||
optional Empty x235 = 235;
|
||||
optional Empty x236 = 236;
|
||||
optional Empty x237 = 237;
|
||||
optional Empty x238 = 238;
|
||||
optional Empty x239 = 239;
|
||||
optional Empty x240 = 240;
|
||||
optional Empty x241 = 241;
|
||||
optional Empty x242 = 242;
|
||||
optional Empty x243 = 243;
|
||||
optional Empty x244 = 244;
|
||||
optional Empty x245 = 245;
|
||||
optional Empty x246 = 246;
|
||||
optional Empty x247 = 247;
|
||||
optional Empty x248 = 248;
|
||||
optional Empty x249 = 249;
|
||||
optional Empty x250 = 250;
|
||||
}
|
||||
|
||||
message MessageList {
|
||||
repeated group Message = 1 {
|
||||
required string name = 2;
|
||||
required int32 count = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message Strings {
|
||||
optional string string_field = 1;
|
||||
optional bytes bytes_field = 2;
|
||||
}
|
||||
|
||||
message Defaults {
|
||||
enum Color {
|
||||
RED = 0;
|
||||
GREEN = 1;
|
||||
BLUE = 2;
|
||||
}
|
||||
|
||||
// Default-valued fields of all basic types.
|
||||
// Same as GoTest, but copied here to make testing easier.
|
||||
optional bool F_Bool = 1 [default=true];
|
||||
optional int32 F_Int32 = 2 [default=32];
|
||||
optional int64 F_Int64 = 3 [default=64];
|
||||
optional fixed32 F_Fixed32 = 4 [default=320];
|
||||
optional fixed64 F_Fixed64 = 5 [default=640];
|
||||
optional uint32 F_Uint32 = 6 [default=3200];
|
||||
optional uint64 F_Uint64 = 7 [default=6400];
|
||||
optional float F_Float = 8 [default=314159.];
|
||||
optional double F_Double = 9 [default=271828.];
|
||||
optional string F_String = 10 [default="hello, \"world!\"\n"];
|
||||
optional bytes F_Bytes = 11 [default="Bignose"];
|
||||
optional sint32 F_Sint32 = 12 [default=-32];
|
||||
optional sint64 F_Sint64 = 13 [default=-64];
|
||||
optional Color F_Enum = 14 [default=GREEN];
|
||||
|
||||
// More fields with crazy defaults.
|
||||
optional float F_Pinf = 15 [default=inf];
|
||||
optional float F_Ninf = 16 [default=-inf];
|
||||
optional float F_Nan = 17 [default=nan];
|
||||
|
||||
// Sub-message.
|
||||
optional SubDefaults sub = 18;
|
||||
|
||||
// Redundant but explicit defaults.
|
||||
optional string str_zero = 19 [default=""];
|
||||
}
|
||||
|
||||
message SubDefaults {
|
||||
optional int64 n = 1 [default=7];
|
||||
}
|
||||
|
||||
message RepeatedEnum {
|
||||
enum Color {
|
||||
RED = 1;
|
||||
}
|
||||
repeated Color color = 1;
|
||||
}
|
||||
|
||||
message MoreRepeated {
|
||||
repeated bool bools = 1;
|
||||
repeated bool bools_packed = 2 [packed=true];
|
||||
repeated int32 ints = 3;
|
||||
repeated int32 ints_packed = 4 [packed=true];
|
||||
repeated int64 int64s_packed = 7 [packed=true];
|
||||
repeated string strings = 5;
|
||||
repeated fixed32 fixeds = 6;
|
||||
}
|
||||
|
||||
// GroupOld and GroupNew have the same wire format.
|
||||
// GroupNew has a new field inside a group.
|
||||
|
||||
message GroupOld {
|
||||
optional group G = 101 {
|
||||
optional int32 x = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message GroupNew {
|
||||
optional group G = 101 {
|
||||
optional int32 x = 2;
|
||||
optional int32 y = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message FloatingPoint {
|
||||
required double f = 1;
|
||||
}
|
||||
695
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text.go
generated
vendored
Normal file
695
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text.go
generated
vendored
Normal file
@@ -0,0 +1,695 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// http://code.google.com/p/goprotobuf/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
// Functions for writing the text protocol buffer format.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
newline = []byte("\n")
|
||||
spaces = []byte(" ")
|
||||
gtNewline = []byte(">\n")
|
||||
endBraceNewline = []byte("}\n")
|
||||
backslashN = []byte{'\\', 'n'}
|
||||
backslashR = []byte{'\\', 'r'}
|
||||
backslashT = []byte{'\\', 't'}
|
||||
backslashDQ = []byte{'\\', '"'}
|
||||
backslashBS = []byte{'\\', '\\'}
|
||||
posInf = []byte("inf")
|
||||
negInf = []byte("-inf")
|
||||
nan = []byte("nan")
|
||||
)
|
||||
|
||||
type writer interface {
|
||||
io.Writer
|
||||
WriteByte(byte) error
|
||||
}
|
||||
|
||||
// textWriter is an io.Writer that tracks its indentation level.
|
||||
type textWriter struct {
|
||||
ind int
|
||||
complete bool // if the current position is a complete line
|
||||
compact bool // whether to write out as a one-liner
|
||||
w writer
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteString(s string) (n int, err error) {
|
||||
if !strings.Contains(s, "\n") {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.complete = false
|
||||
return io.WriteString(w.w, s)
|
||||
}
|
||||
// WriteString is typically called without newlines, so this
|
||||
// codepath and its copy are rare. We copy to avoid
|
||||
// duplicating all of Write's logic here.
|
||||
return w.Write([]byte(s))
|
||||
}
|
||||
|
||||
func (w *textWriter) Write(p []byte) (n int, err error) {
|
||||
newlines := bytes.Count(p, newline)
|
||||
if newlines == 0 {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
n, err = w.w.Write(p)
|
||||
w.complete = false
|
||||
return n, err
|
||||
}
|
||||
|
||||
frags := bytes.SplitN(p, newline, newlines+1)
|
||||
if w.compact {
|
||||
for i, frag := range frags {
|
||||
if i > 0 {
|
||||
if err := w.w.WriteByte(' '); err != nil {
|
||||
return n, err
|
||||
}
|
||||
n++
|
||||
}
|
||||
nn, err := w.w.Write(frag)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
for i, frag := range frags {
|
||||
if w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
nn, err := w.w.Write(frag)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if i+1 < len(frags) {
|
||||
if err := w.w.WriteByte('\n'); err != nil {
|
||||
return n, err
|
||||
}
|
||||
n++
|
||||
}
|
||||
}
|
||||
w.complete = len(frags[len(frags)-1]) == 0
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteByte(c byte) error {
|
||||
if w.compact && c == '\n' {
|
||||
c = ' '
|
||||
}
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
err := w.w.WriteByte(c)
|
||||
w.complete = c == '\n'
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *textWriter) indent() { w.ind++ }
|
||||
|
||||
func (w *textWriter) unindent() {
|
||||
if w.ind == 0 {
|
||||
log.Printf("proto: textWriter unindented too far")
|
||||
return
|
||||
}
|
||||
w.ind--
|
||||
}
|
||||
|
||||
func writeName(w *textWriter, props *Properties) error {
|
||||
if _, err := w.WriteString(props.OrigName); err != nil {
|
||||
return err
|
||||
}
|
||||
if props.Wire != "group" {
|
||||
return w.WriteByte(':')
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
messageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem()
|
||||
)
|
||||
|
||||
// raw is the interface satisfied by RawMessage.
|
||||
type raw interface {
|
||||
Bytes() []byte
|
||||
}
|
||||
|
||||
func writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
if sv.Type() == messageSetType {
|
||||
return writeMessageSet(w, sv.Addr().Interface().(*MessageSet))
|
||||
}
|
||||
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
for i := 0; i < sv.NumField(); i++ {
|
||||
fv := sv.Field(i)
|
||||
props := sprops.Prop[i]
|
||||
name := st.Field(i).Name
|
||||
|
||||
if strings.HasPrefix(name, "XXX_") {
|
||||
// There are two XXX_ fields:
|
||||
// XXX_unrecognized []byte
|
||||
// XXX_extensions map[int32]proto.Extension
|
||||
// The first is handled here;
|
||||
// the second is handled at the bottom of this function.
|
||||
if name == "XXX_unrecognized" && !fv.IsNil() {
|
||||
if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Ptr && fv.IsNil() {
|
||||
// Field not filled in. This could be an optional field or
|
||||
// a required field that wasn't filled in. Either way, there
|
||||
// isn't anything we can show for it.
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Slice && fv.IsNil() {
|
||||
// Repeated field that is empty, or a bytes field that is unused.
|
||||
continue
|
||||
}
|
||||
|
||||
if props.Repeated && fv.Kind() == reflect.Slice {
|
||||
// Repeated field.
|
||||
for j := 0; j < fv.Len(); j++ {
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
v := fv.Index(j)
|
||||
if v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
// A nil message in a repeated field is not valid,
|
||||
// but we can handle that more gracefully than panicking.
|
||||
if _, err := w.Write([]byte("<nil>\n")); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err := writeAny(w, v, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if b, ok := fv.Interface().(raw); ok {
|
||||
if err := writeRaw(w, b.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Enums have a String method, so writeAny will work fine.
|
||||
if err := writeAny(w, fv, props); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Extensions (the XXX_extensions field).
|
||||
pv := sv.Addr()
|
||||
if pv.Type().Implements(extendableProtoType) {
|
||||
if err := writeExtensions(w, pv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeRaw writes an uninterpreted raw message.
|
||||
func writeRaw(w *textWriter, b []byte) error {
|
||||
if err := w.WriteByte('<'); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.indent()
|
||||
if err := writeUnknownStruct(w, b); err != nil {
|
||||
return err
|
||||
}
|
||||
w.unindent()
|
||||
if err := w.WriteByte('>'); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeAny writes an arbitrary field.
|
||||
func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
v = reflect.Indirect(v)
|
||||
|
||||
// Floats have special cases.
|
||||
if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
|
||||
x := v.Float()
|
||||
var b []byte
|
||||
switch {
|
||||
case math.IsInf(x, 1):
|
||||
b = posInf
|
||||
case math.IsInf(x, -1):
|
||||
b = negInf
|
||||
case math.IsNaN(x):
|
||||
b = nan
|
||||
}
|
||||
if b != nil {
|
||||
_, err := w.Write(b)
|
||||
return err
|
||||
}
|
||||
// Other values are handled below.
|
||||
}
|
||||
|
||||
// We don't attempt to serialise every possible value type; only those
|
||||
// that can occur in protocol buffers.
|
||||
switch v.Kind() {
|
||||
case reflect.Slice:
|
||||
// Should only be a []byte; repeated fields are handled in writeStruct.
|
||||
if err := writeString(w, string(v.Interface().([]byte))); err != nil {
|
||||
return err
|
||||
}
|
||||
case reflect.String:
|
||||
if err := writeString(w, v.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
case reflect.Struct:
|
||||
// Required/optional group/message.
|
||||
var bra, ket byte = '<', '>'
|
||||
if props != nil && props.Wire == "group" {
|
||||
bra, ket = '{', '}'
|
||||
}
|
||||
if err := w.WriteByte(bra); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.indent()
|
||||
if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
||||
text, err := tm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = w.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err := writeStruct(w, v); err != nil {
|
||||
return err
|
||||
}
|
||||
w.unindent()
|
||||
if err := w.WriteByte(ket); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
_, err := fmt.Fprint(w, v.Interface())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// equivalent to C's isprint.
|
||||
func isprint(c byte) bool {
|
||||
return c >= 0x20 && c < 0x7f
|
||||
}
|
||||
|
||||
// writeString writes a string in the protocol buffer text format.
|
||||
// It is similar to strconv.Quote except we don't use Go escape sequences,
|
||||
// we treat the string as a byte sequence, and we use octal escapes.
|
||||
// These differences are to maintain interoperability with the other
|
||||
// languages' implementations of the text format.
|
||||
func writeString(w *textWriter, s string) error {
|
||||
// use WriteByte here to get any needed indent
|
||||
if err := w.WriteByte('"'); err != nil {
|
||||
return err
|
||||
}
|
||||
// Loop over the bytes, not the runes.
|
||||
for i := 0; i < len(s); i++ {
|
||||
var err error
|
||||
// Divergence from C++: we don't escape apostrophes.
|
||||
// There's no need to escape them, and the C++ parser
|
||||
// copes with a naked apostrophe.
|
||||
switch c := s[i]; c {
|
||||
case '\n':
|
||||
_, err = w.w.Write(backslashN)
|
||||
case '\r':
|
||||
_, err = w.w.Write(backslashR)
|
||||
case '\t':
|
||||
_, err = w.w.Write(backslashT)
|
||||
case '"':
|
||||
_, err = w.w.Write(backslashDQ)
|
||||
case '\\':
|
||||
_, err = w.w.Write(backslashBS)
|
||||
default:
|
||||
if isprint(c) {
|
||||
err = w.w.WriteByte(c)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w.w, "\\%03o", c)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return w.WriteByte('"')
|
||||
}
|
||||
|
||||
func writeMessageSet(w *textWriter, ms *MessageSet) error {
|
||||
for _, item := range ms.Item {
|
||||
id := *item.TypeId
|
||||
if msd, ok := messageSetMap[id]; ok {
|
||||
// Known message set type.
|
||||
if _, err := fmt.Fprintf(w, "[%s]: <\n", msd.name); err != nil {
|
||||
return err
|
||||
}
|
||||
w.indent()
|
||||
|
||||
pb := reflect.New(msd.t.Elem())
|
||||
if err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil {
|
||||
if _, err := fmt.Fprintf(w, "/* bad message: %v */\n", err); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := writeStruct(w, pb.Elem()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Unknown type.
|
||||
if _, err := fmt.Fprintf(w, "[%d]: <\n", id); err != nil {
|
||||
return err
|
||||
}
|
||||
w.indent()
|
||||
if err := writeUnknownStruct(w, item.Message); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.unindent()
|
||||
if _, err := w.Write(gtNewline); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeUnknownStruct(w *textWriter, data []byte) (err error) {
|
||||
if !w.compact {
|
||||
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
b := NewBuffer(data)
|
||||
for b.index < len(b.buf) {
|
||||
x, err := b.DecodeVarint()
|
||||
if err != nil {
|
||||
_, err := fmt.Fprintf(w, "/* %v */\n", err)
|
||||
return err
|
||||
}
|
||||
wire, tag := x&7, x>>3
|
||||
if wire == WireEndGroup {
|
||||
w.unindent()
|
||||
if _, err := w.Write(endBraceNewline); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if _, err := fmt.Fprint(w, tag); err != nil {
|
||||
return err
|
||||
}
|
||||
if wire != WireStartGroup {
|
||||
if err := w.WriteByte(':'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !w.compact || wire == WireStartGroup {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
switch wire {
|
||||
case WireBytes:
|
||||
buf, e := b.DecodeRawBytes(false)
|
||||
if e == nil {
|
||||
_, err = fmt.Fprintf(w, "%q", buf)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w, "/* %v */", e)
|
||||
}
|
||||
case WireFixed32:
|
||||
x, err = b.DecodeFixed32()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
case WireFixed64:
|
||||
x, err = b.DecodeFixed64()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
case WireStartGroup:
|
||||
err = w.WriteByte('{')
|
||||
w.indent()
|
||||
case WireVarint:
|
||||
x, err = b.DecodeVarint()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
default:
|
||||
_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeUnknownInt(w *textWriter, x uint64, err error) error {
|
||||
if err == nil {
|
||||
_, err = fmt.Fprint(w, x)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w, "/* %v */", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type int32Slice []int32
|
||||
|
||||
func (s int32Slice) Len() int { return len(s) }
|
||||
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// writeExtensions writes all the extensions in pv.
|
||||
// pv is assumed to be a pointer to a protocol message struct that is extendable.
|
||||
func writeExtensions(w *textWriter, pv reflect.Value) error {
|
||||
emap := extensionMaps[pv.Type().Elem()]
|
||||
ep := pv.Interface().(extendableProto)
|
||||
|
||||
// Order the extensions by ID.
|
||||
// This isn't strictly necessary, but it will give us
|
||||
// canonical output, which will also make testing easier.
|
||||
m := ep.ExtensionMap()
|
||||
ids := make([]int32, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
sort.Sort(int32Slice(ids))
|
||||
|
||||
for _, extNum := range ids {
|
||||
ext := m[extNum]
|
||||
var desc *ExtensionDesc
|
||||
if emap != nil {
|
||||
desc = emap[extNum]
|
||||
}
|
||||
if desc == nil {
|
||||
// Unknown extension.
|
||||
if err := writeUnknownStruct(w, ext.enc); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
pb, err := GetExtension(ep, desc)
|
||||
if err != nil {
|
||||
if _, err := fmt.Fprintln(os.Stderr, "proto: failed getting extension: ", err); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Repeated extensions will appear as a slice.
|
||||
if !desc.repeated() {
|
||||
if err := writeExtension(w, desc.Name, pb); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
v := reflect.ValueOf(pb)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeExtension(w *textWriter, name string, pb interface{}) error {
|
||||
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeIndent() {
|
||||
if !w.complete {
|
||||
return
|
||||
}
|
||||
remain := w.ind * 2
|
||||
for remain > 0 {
|
||||
n := remain
|
||||
if n > len(spaces) {
|
||||
n = len(spaces)
|
||||
}
|
||||
w.w.Write(spaces[:n])
|
||||
remain -= n
|
||||
}
|
||||
w.complete = false
|
||||
}
|
||||
|
||||
func marshalText(w io.Writer, pb Message, compact bool) error {
|
||||
val := reflect.ValueOf(pb)
|
||||
if pb == nil || val.IsNil() {
|
||||
w.Write([]byte("<nil>"))
|
||||
return nil
|
||||
}
|
||||
var bw *bufio.Writer
|
||||
ww, ok := w.(writer)
|
||||
if !ok {
|
||||
bw = bufio.NewWriter(w)
|
||||
ww = bw
|
||||
}
|
||||
aw := &textWriter{
|
||||
w: ww,
|
||||
complete: true,
|
||||
compact: compact,
|
||||
}
|
||||
|
||||
if tm, ok := pb.(encoding.TextMarshaler); ok {
|
||||
text, err := tm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = aw.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
if bw != nil {
|
||||
return bw.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Dereference the received pointer so we don't have outer < and >.
|
||||
v := reflect.Indirect(val)
|
||||
if err := writeStruct(aw, v); err != nil {
|
||||
return err
|
||||
}
|
||||
if bw != nil {
|
||||
return bw.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalText writes a given protocol buffer in text format.
|
||||
// The only errors returned are from w.
|
||||
func MarshalText(w io.Writer, pb Message) error {
|
||||
return marshalText(w, pb, false)
|
||||
}
|
||||
|
||||
// MarshalTextString is the same as MarshalText, but returns the string directly.
|
||||
func MarshalTextString(pb Message) string {
|
||||
var buf bytes.Buffer
|
||||
marshalText(&buf, pb, false)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// CompactText writes a given protocol buffer in compact text format (one line).
|
||||
func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) }
|
||||
|
||||
// CompactTextString is the same as CompactText, but returns the string directly.
|
||||
func CompactTextString(pb Message) string {
|
||||
var buf bytes.Buffer
|
||||
marshalText(&buf, pb, true)
|
||||
return buf.String()
|
||||
}
|
||||
687
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_parser.go
generated
vendored
Normal file
687
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_parser.go
generated
vendored
Normal file
@@ -0,0 +1,687 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// http://code.google.com/p/goprotobuf/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
// Functions for parsing the Text protocol buffer format.
|
||||
// TODO: message sets.
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type ParseError struct {
|
||||
Message string
|
||||
Line int // 1-based line number
|
||||
Offset int // 0-based byte offset from start of input
|
||||
}
|
||||
|
||||
func (p *ParseError) Error() string {
|
||||
if p.Line == 1 {
|
||||
// show offset only for first line
|
||||
return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
|
||||
}
|
||||
return fmt.Sprintf("line %d: %v", p.Line, p.Message)
|
||||
}
|
||||
|
||||
type token struct {
|
||||
value string
|
||||
err *ParseError
|
||||
line int // line number
|
||||
offset int // byte number from start of input, not start of line
|
||||
unquoted string // the unquoted version of value, if it was a quoted string
|
||||
}
|
||||
|
||||
func (t *token) String() string {
|
||||
if t.err == nil {
|
||||
return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
|
||||
}
|
||||
return fmt.Sprintf("parse error: %v", t.err)
|
||||
}
|
||||
|
||||
type textParser struct {
|
||||
s string // remaining input
|
||||
done bool // whether the parsing is finished (success or error)
|
||||
backed bool // whether back() was called
|
||||
offset, line int
|
||||
cur token
|
||||
}
|
||||
|
||||
func newTextParser(s string) *textParser {
|
||||
p := new(textParser)
|
||||
p.s = s
|
||||
p.line = 1
|
||||
p.cur.line = 1
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
|
||||
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
|
||||
p.cur.err = pe
|
||||
p.done = true
|
||||
return pe
|
||||
}
|
||||
|
||||
// Numbers and identifiers are matched by [-+._A-Za-z0-9]
|
||||
func isIdentOrNumberChar(c byte) bool {
|
||||
switch {
|
||||
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
|
||||
return true
|
||||
case '0' <= c && c <= '9':
|
||||
return true
|
||||
}
|
||||
switch c {
|
||||
case '-', '+', '.', '_':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isWhitespace(c byte) bool {
|
||||
switch c {
|
||||
case ' ', '\t', '\n', '\r':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *textParser) skipWhitespace() {
|
||||
i := 0
|
||||
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
|
||||
if p.s[i] == '#' {
|
||||
// comment; skip to end of line or input
|
||||
for i < len(p.s) && p.s[i] != '\n' {
|
||||
i++
|
||||
}
|
||||
if i == len(p.s) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if p.s[i] == '\n' {
|
||||
p.line++
|
||||
}
|
||||
i++
|
||||
}
|
||||
p.offset += i
|
||||
p.s = p.s[i:len(p.s)]
|
||||
if len(p.s) == 0 {
|
||||
p.done = true
|
||||
}
|
||||
}
|
||||
|
||||
func (p *textParser) advance() {
|
||||
// Skip whitespace
|
||||
p.skipWhitespace()
|
||||
if p.done {
|
||||
return
|
||||
}
|
||||
|
||||
// Start of non-whitespace
|
||||
p.cur.err = nil
|
||||
p.cur.offset, p.cur.line = p.offset, p.line
|
||||
p.cur.unquoted = ""
|
||||
switch p.s[0] {
|
||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',':
|
||||
// Single symbol
|
||||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
||||
case '"', '\'':
|
||||
// Quoted string
|
||||
i := 1
|
||||
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
|
||||
if p.s[i] == '\\' && i+1 < len(p.s) {
|
||||
// skip escaped char
|
||||
i++
|
||||
}
|
||||
i++
|
||||
}
|
||||
if i >= len(p.s) || p.s[i] != p.s[0] {
|
||||
p.errorf("unmatched quote")
|
||||
return
|
||||
}
|
||||
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
|
||||
if err != nil {
|
||||
p.errorf("invalid quoted string %v", p.s[0:i+1])
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
|
||||
p.cur.unquoted = unq
|
||||
default:
|
||||
i := 0
|
||||
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
|
||||
i++
|
||||
}
|
||||
if i == 0 {
|
||||
p.errorf("unexpected byte %#x", p.s[0])
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
|
||||
}
|
||||
p.offset += len(p.cur.value)
|
||||
}
|
||||
|
||||
var (
|
||||
errBadUTF8 = errors.New("proto: bad UTF-8")
|
||||
errBadHex = errors.New("proto: bad hexadecimal")
|
||||
)
|
||||
|
||||
func unquoteC(s string, quote rune) (string, error) {
|
||||
// This is based on C++'s tokenizer.cc.
|
||||
// Despite its name, this is *not* parsing C syntax.
|
||||
// For instance, "\0" is an invalid quoted string.
|
||||
|
||||
// Avoid allocation in trivial cases.
|
||||
simple := true
|
||||
for _, r := range s {
|
||||
if r == '\\' || r == quote {
|
||||
simple = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if simple {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
buf := make([]byte, 0, 3*len(s)/2)
|
||||
for len(s) > 0 {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
if r != '\\' {
|
||||
if r < utf8.RuneSelf {
|
||||
buf = append(buf, byte(r))
|
||||
} else {
|
||||
buf = append(buf, string(r)...)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ch, tail, err := unescape(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf = append(buf, ch...)
|
||||
s = tail
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
func unescape(s string) (ch string, tail string, err error) {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
switch r {
|
||||
case 'a':
|
||||
return "\a", s, nil
|
||||
case 'b':
|
||||
return "\b", s, nil
|
||||
case 'f':
|
||||
return "\f", s, nil
|
||||
case 'n':
|
||||
return "\n", s, nil
|
||||
case 'r':
|
||||
return "\r", s, nil
|
||||
case 't':
|
||||
return "\t", s, nil
|
||||
case 'v':
|
||||
return "\v", s, nil
|
||||
case '?':
|
||||
return "?", s, nil // trigraph workaround
|
||||
case '\'', '"', '\\':
|
||||
return string(r), s, nil
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
|
||||
if len(s) < 2 {
|
||||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
||||
}
|
||||
base := 8
|
||||
ss := s[:2]
|
||||
s = s[2:]
|
||||
if r == 'x' || r == 'X' {
|
||||
base = 16
|
||||
} else {
|
||||
ss = string(r) + ss
|
||||
}
|
||||
i, err := strconv.ParseUint(ss, base, 8)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
case 'u', 'U':
|
||||
n := 4
|
||||
if r == 'U' {
|
||||
n = 8
|
||||
}
|
||||
if len(s) < n {
|
||||
return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
|
||||
}
|
||||
|
||||
bs := make([]byte, n/2)
|
||||
for i := 0; i < n; i += 2 {
|
||||
a, ok1 := unhex(s[i])
|
||||
b, ok2 := unhex(s[i+1])
|
||||
if !ok1 || !ok2 {
|
||||
return "", "", errBadHex
|
||||
}
|
||||
bs[i/2] = a<<4 | b
|
||||
}
|
||||
s = s[n:]
|
||||
return string(bs), s, nil
|
||||
}
|
||||
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
||||
}
|
||||
|
||||
// Adapted from src/pkg/strconv/quote.go.
|
||||
func unhex(b byte) (v byte, ok bool) {
|
||||
switch {
|
||||
case '0' <= b && b <= '9':
|
||||
return b - '0', true
|
||||
case 'a' <= b && b <= 'f':
|
||||
return b - 'a' + 10, true
|
||||
case 'A' <= b && b <= 'F':
|
||||
return b - 'A' + 10, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// Back off the parser by one token. Can only be done between calls to next().
|
||||
// It makes the next advance() a no-op.
|
||||
func (p *textParser) back() { p.backed = true }
|
||||
|
||||
// Advances the parser and returns the new current token.
|
||||
func (p *textParser) next() *token {
|
||||
if p.backed || p.done {
|
||||
p.backed = false
|
||||
return &p.cur
|
||||
}
|
||||
p.advance()
|
||||
if p.done {
|
||||
p.cur.value = ""
|
||||
} else if len(p.cur.value) > 0 && p.cur.value[0] == '"' {
|
||||
// Look for multiple quoted strings separated by whitespace,
|
||||
// and concatenate them.
|
||||
cat := p.cur
|
||||
for {
|
||||
p.skipWhitespace()
|
||||
if p.done || p.s[0] != '"' {
|
||||
break
|
||||
}
|
||||
p.advance()
|
||||
if p.cur.err != nil {
|
||||
return &p.cur
|
||||
}
|
||||
cat.value += " " + p.cur.value
|
||||
cat.unquoted += p.cur.unquoted
|
||||
}
|
||||
p.done = false // parser may have seen EOF, but we want to return cat
|
||||
p.cur = cat
|
||||
}
|
||||
return &p.cur
|
||||
}
|
||||
|
||||
// Return a RequiredNotSetError indicating which required field was not set.
|
||||
func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
for i := 0; i < st.NumField(); i++ {
|
||||
if !isNil(sv.Field(i)) {
|
||||
continue
|
||||
}
|
||||
|
||||
props := sprops.Prop[i]
|
||||
if props.Required {
|
||||
return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
|
||||
}
|
||||
}
|
||||
return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
|
||||
}
|
||||
|
||||
// Returns the index in the struct for the named field, as well as the parsed tag properties.
|
||||
func structFieldByName(st reflect.Type, name string) (int, *Properties, bool) {
|
||||
sprops := GetProperties(st)
|
||||
i, ok := sprops.decoderOrigNames[name]
|
||||
if ok {
|
||||
return i, sprops.Prop[i], true
|
||||
}
|
||||
return -1, nil, false
|
||||
}
|
||||
|
||||
// Consume a ':' from the input stream (if the next token is a colon),
|
||||
// returning an error if a colon is needed but not present.
|
||||
func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ":" {
|
||||
// Colon is optional when the field is a group or message.
|
||||
needColon := true
|
||||
switch props.Wire {
|
||||
case "group":
|
||||
needColon = false
|
||||
case "bytes":
|
||||
// A "bytes" field is either a message, a string, or a repeated field;
|
||||
// those three become *T, *string and []T respectively, so we can check for
|
||||
// this field being a pointer to a non-string.
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
// *T or *string
|
||||
if typ.Elem().Kind() == reflect.String {
|
||||
break
|
||||
}
|
||||
} else if typ.Kind() == reflect.Slice {
|
||||
// []T or []*T
|
||||
if typ.Elem().Kind() != reflect.Ptr {
|
||||
break
|
||||
}
|
||||
}
|
||||
needColon = false
|
||||
}
|
||||
if needColon {
|
||||
return p.errorf("expected ':', found %q", tok.value)
|
||||
}
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
||||
st := sv.Type()
|
||||
reqCount := GetProperties(st).reqCount
|
||||
var reqFieldErr error
|
||||
fieldSet := make(map[string]bool)
|
||||
// A struct is a sequence of "name: value", terminated by one of
|
||||
// '>' or '}', or the end of the input. A name may also be
|
||||
// "[extension]".
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
if tok.value == "[" {
|
||||
// Looks like an extension.
|
||||
//
|
||||
// TODO: Check whether we need to handle
|
||||
// namespace rooted names (e.g. ".something.Foo").
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
var desc *ExtensionDesc
|
||||
// This could be faster, but it's functional.
|
||||
// TODO: Do something smarter than a linear scan.
|
||||
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
|
||||
if d.Name == tok.value {
|
||||
desc = d
|
||||
break
|
||||
}
|
||||
}
|
||||
if desc == nil {
|
||||
return p.errorf("unrecognized extension %q", tok.value)
|
||||
}
|
||||
// Check the extension terminator.
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != "]" {
|
||||
return p.errorf("unrecognized extension terminator %q", tok.value)
|
||||
}
|
||||
|
||||
props := &Properties{}
|
||||
props.Parse(desc.Tag)
|
||||
|
||||
typ := reflect.TypeOf(desc.ExtensionType)
|
||||
if err := p.checkForColon(props, typ); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rep := desc.repeated()
|
||||
|
||||
// Read the extension structure, and set it in
|
||||
// the value we're constructing.
|
||||
var ext reflect.Value
|
||||
if !rep {
|
||||
ext = reflect.New(typ).Elem()
|
||||
} else {
|
||||
ext = reflect.New(typ.Elem()).Elem()
|
||||
}
|
||||
if err := p.readAny(ext, props); err != nil {
|
||||
if _, ok := err.(*RequiredNotSetError); !ok {
|
||||
return err
|
||||
}
|
||||
reqFieldErr = err
|
||||
}
|
||||
ep := sv.Addr().Interface().(extendableProto)
|
||||
if !rep {
|
||||
SetExtension(ep, desc, ext.Interface())
|
||||
} else {
|
||||
old, err := GetExtension(ep, desc)
|
||||
var sl reflect.Value
|
||||
if err == nil {
|
||||
sl = reflect.ValueOf(old) // existing slice
|
||||
} else {
|
||||
sl = reflect.MakeSlice(typ, 0, 1)
|
||||
}
|
||||
sl = reflect.Append(sl, ext)
|
||||
SetExtension(ep, desc, sl.Interface())
|
||||
}
|
||||
} else {
|
||||
// This is a normal, non-extension field.
|
||||
name := tok.value
|
||||
fi, props, ok := structFieldByName(st, name)
|
||||
if !ok {
|
||||
return p.errorf("unknown field name %q in %v", name, st)
|
||||
}
|
||||
|
||||
dst := sv.Field(fi)
|
||||
|
||||
// Check that it's not already set if it's not a repeated field.
|
||||
if !props.Repeated && fieldSet[name] {
|
||||
return p.errorf("non-repeated field %q was repeated", name)
|
||||
}
|
||||
|
||||
if err := p.checkForColon(props, st.Field(fi).Type); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse into the field.
|
||||
fieldSet[name] = true
|
||||
if err := p.readAny(dst, props); err != nil {
|
||||
if _, ok := err.(*RequiredNotSetError); !ok {
|
||||
return err
|
||||
}
|
||||
reqFieldErr = err
|
||||
} else if props.Required {
|
||||
reqCount--
|
||||
}
|
||||
}
|
||||
|
||||
// For backward compatibility, permit a semicolon or comma after a field.
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ";" && tok.value != "," {
|
||||
p.back()
|
||||
}
|
||||
}
|
||||
|
||||
if reqCount > 0 {
|
||||
return p.missingRequiredFieldError(sv)
|
||||
}
|
||||
return reqFieldErr
|
||||
}
|
||||
|
||||
func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == "" {
|
||||
return p.errorf("unexpected EOF")
|
||||
}
|
||||
|
||||
switch fv := v; fv.Kind() {
|
||||
case reflect.Slice:
|
||||
at := v.Type()
|
||||
if at.Elem().Kind() == reflect.Uint8 {
|
||||
// Special case for []byte
|
||||
if tok.value[0] != '"' && tok.value[0] != '\'' {
|
||||
// Deliberately written out here, as the error after
|
||||
// this switch statement would write "invalid []byte: ...",
|
||||
// which is not as user-friendly.
|
||||
return p.errorf("invalid string: %v", tok.value)
|
||||
}
|
||||
bytes := []byte(tok.unquoted)
|
||||
fv.Set(reflect.ValueOf(bytes))
|
||||
return nil
|
||||
}
|
||||
// Repeated field. May already exist.
|
||||
flen := fv.Len()
|
||||
if flen == fv.Cap() {
|
||||
nav := reflect.MakeSlice(at, flen, 2*flen+1)
|
||||
reflect.Copy(nav, fv)
|
||||
fv.Set(nav)
|
||||
}
|
||||
fv.SetLen(flen + 1)
|
||||
|
||||
// Read one.
|
||||
p.back()
|
||||
return p.readAny(fv.Index(flen), props)
|
||||
case reflect.Bool:
|
||||
// Either "true", "false", 1 or 0.
|
||||
switch tok.value {
|
||||
case "true", "1":
|
||||
fv.SetBool(true)
|
||||
return nil
|
||||
case "false", "0":
|
||||
fv.SetBool(false)
|
||||
return nil
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
v := tok.value
|
||||
// Ignore 'f' for compatibility with output generated by C++, but don't
|
||||
// remove 'f' when the value is "-inf" or "inf".
|
||||
if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
|
||||
v = v[:len(v)-1]
|
||||
}
|
||||
if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
|
||||
fv.SetFloat(f)
|
||||
return nil
|
||||
}
|
||||
case reflect.Int32:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||
fv.SetInt(x)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(props.Enum) == 0 {
|
||||
break
|
||||
}
|
||||
m, ok := enumValueMaps[props.Enum]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
x, ok := m[tok.value]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
fv.SetInt(int64(x))
|
||||
return nil
|
||||
case reflect.Int64:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
|
||||
fv.SetInt(x)
|
||||
return nil
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// A basic field (indirected through pointer), or a repeated message/group
|
||||
p.back()
|
||||
fv.Set(reflect.New(fv.Type().Elem()))
|
||||
return p.readAny(fv.Elem(), props)
|
||||
case reflect.String:
|
||||
if tok.value[0] == '"' || tok.value[0] == '\'' {
|
||||
fv.SetString(tok.unquoted)
|
||||
return nil
|
||||
}
|
||||
case reflect.Struct:
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "{":
|
||||
terminator = "}"
|
||||
case "<":
|
||||
terminator = ">"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
|
||||
return p.readStruct(fv, terminator)
|
||||
case reflect.Uint32:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||
fv.SetUint(uint64(x))
|
||||
return nil
|
||||
}
|
||||
case reflect.Uint64:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||
fv.SetUint(x)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return p.errorf("invalid %v: %v", v.Type(), tok.value)
|
||||
}
|
||||
|
||||
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
|
||||
// before starting to unmarshal, so any existing data in pb is always removed.
|
||||
// If a required field is not set and no other error occurs,
|
||||
// UnmarshalText returns *RequiredNotSetError.
|
||||
func UnmarshalText(s string, pb Message) error {
|
||||
if um, ok := pb.(encoding.TextUnmarshaler); ok {
|
||||
err := um.UnmarshalText([]byte(s))
|
||||
return err
|
||||
}
|
||||
pb.Reset()
|
||||
v := reflect.ValueOf(pb)
|
||||
if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
|
||||
return pe
|
||||
}
|
||||
return nil
|
||||
}
|
||||
468
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_parser_test.go
generated
vendored
Normal file
468
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_parser_test.go
generated
vendored
Normal file
@@ -0,0 +1,468 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// http://code.google.com/p/goprotobuf/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
. "./testdata"
|
||||
. "code.google.com/p/goprotobuf/proto"
|
||||
)
|
||||
|
||||
type UnmarshalTextTest struct {
|
||||
in string
|
||||
err string // if "", no error expected
|
||||
out *MyMessage
|
||||
}
|
||||
|
||||
func buildExtStructTest(text string) UnmarshalTextTest {
|
||||
msg := &MyMessage{
|
||||
Count: Int32(42),
|
||||
}
|
||||
SetExtension(msg, E_Ext_More, &Ext{
|
||||
Data: String("Hello, world!"),
|
||||
})
|
||||
return UnmarshalTextTest{in: text, out: msg}
|
||||
}
|
||||
|
||||
func buildExtDataTest(text string) UnmarshalTextTest {
|
||||
msg := &MyMessage{
|
||||
Count: Int32(42),
|
||||
}
|
||||
SetExtension(msg, E_Ext_Text, String("Hello, world!"))
|
||||
SetExtension(msg, E_Ext_Number, Int32(1729))
|
||||
return UnmarshalTextTest{in: text, out: msg}
|
||||
}
|
||||
|
||||
func buildExtRepStringTest(text string) UnmarshalTextTest {
|
||||
msg := &MyMessage{
|
||||
Count: Int32(42),
|
||||
}
|
||||
if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return UnmarshalTextTest{in: text, out: msg}
|
||||
}
|
||||
|
||||
var unMarshalTextTests = []UnmarshalTextTest{
|
||||
// Basic
|
||||
{
|
||||
in: " count:42\n name:\"Dave\" ",
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("Dave"),
|
||||
},
|
||||
},
|
||||
|
||||
// Empty quoted string
|
||||
{
|
||||
in: `count:42 name:""`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String(""),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string concatenation
|
||||
{
|
||||
in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("My name is elsewhere"),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string with escaped apostrophe
|
||||
{
|
||||
in: `count:42 name: "HOLIDAY - New Year\'s Day"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("HOLIDAY - New Year's Day"),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string with single quote
|
||||
{
|
||||
in: `count:42 name: 'Roger "The Ramster" Ramjet'`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String(`Roger "The Ramster" Ramjet`),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string with all the accepted special characters from the C++ test
|
||||
{
|
||||
in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"",
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string with quoted backslash
|
||||
{
|
||||
in: `count:42 name: "\\'xyz"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String(`\'xyz`),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string with UTF-8 bytes.
|
||||
{
|
||||
in: "count:42 name: '\303\277\302\201\xAB'",
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("\303\277\302\201\xAB"),
|
||||
},
|
||||
},
|
||||
|
||||
// Bad quoted string
|
||||
{
|
||||
in: `inner: < host: "\0" >` + "\n",
|
||||
err: `line 1.15: invalid quoted string "\0"`,
|
||||
},
|
||||
|
||||
// Number too large for int64
|
||||
{
|
||||
in: "count: 1 others { key: 123456789012345678901 }",
|
||||
err: "line 1.23: invalid int64: 123456789012345678901",
|
||||
},
|
||||
|
||||
// Number too large for int32
|
||||
{
|
||||
in: "count: 1234567890123",
|
||||
err: "line 1.7: invalid int32: 1234567890123",
|
||||
},
|
||||
|
||||
// Number in hexadecimal
|
||||
{
|
||||
in: "count: 0x2beef",
|
||||
out: &MyMessage{
|
||||
Count: Int32(0x2beef),
|
||||
},
|
||||
},
|
||||
|
||||
// Number in octal
|
||||
{
|
||||
in: "count: 024601",
|
||||
out: &MyMessage{
|
||||
Count: Int32(024601),
|
||||
},
|
||||
},
|
||||
|
||||
// Floating point number with "f" suffix
|
||||
{
|
||||
in: "count: 4 others:< weight: 17.0f >",
|
||||
out: &MyMessage{
|
||||
Count: Int32(4),
|
||||
Others: []*OtherMessage{
|
||||
{
|
||||
Weight: Float32(17),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// Floating point positive infinity
|
||||
{
|
||||
in: "count: 4 bigfloat: inf",
|
||||
out: &MyMessage{
|
||||
Count: Int32(4),
|
||||
Bigfloat: Float64(math.Inf(1)),
|
||||
},
|
||||
},
|
||||
|
||||
// Floating point negative infinity
|
||||
{
|
||||
in: "count: 4 bigfloat: -inf",
|
||||
out: &MyMessage{
|
||||
Count: Int32(4),
|
||||
Bigfloat: Float64(math.Inf(-1)),
|
||||
},
|
||||
},
|
||||
|
||||
// Number too large for float32
|
||||
{
|
||||
in: "others:< weight: 12345678901234567890123456789012345678901234567890 >",
|
||||
err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890",
|
||||
},
|
||||
|
||||
// Number posing as a quoted string
|
||||
{
|
||||
in: `inner: < host: 12 >` + "\n",
|
||||
err: `line 1.15: invalid string: 12`,
|
||||
},
|
||||
|
||||
// Quoted string posing as int32
|
||||
{
|
||||
in: `count: "12"`,
|
||||
err: `line 1.7: invalid int32: "12"`,
|
||||
},
|
||||
|
||||
// Quoted string posing a float32
|
||||
{
|
||||
in: `others:< weight: "17.4" >`,
|
||||
err: `line 1.17: invalid float32: "17.4"`,
|
||||
},
|
||||
|
||||
// Enum
|
||||
{
|
||||
in: `count:42 bikeshed: BLUE`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Bikeshed: MyMessage_BLUE.Enum(),
|
||||
},
|
||||
},
|
||||
|
||||
// Repeated field
|
||||
{
|
||||
in: `count:42 pet: "horsey" pet:"bunny"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Pet: []string{"horsey", "bunny"},
|
||||
},
|
||||
},
|
||||
|
||||
// Repeated message with/without colon and <>/{}
|
||||
{
|
||||
in: `count:42 others:{} others{} others:<> others:{}`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Others: []*OtherMessage{
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// Missing colon for inner message
|
||||
{
|
||||
in: `count:42 inner < host: "cauchy.syd" >`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Inner: &InnerMessage{
|
||||
Host: String("cauchy.syd"),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// Missing colon for string field
|
||||
{
|
||||
in: `name "Dave"`,
|
||||
err: `line 1.5: expected ':', found "\"Dave\""`,
|
||||
},
|
||||
|
||||
// Missing colon for int32 field
|
||||
{
|
||||
in: `count 42`,
|
||||
err: `line 1.6: expected ':', found "42"`,
|
||||
},
|
||||
|
||||
// Missing required field
|
||||
{
|
||||
in: `name: "Pawel"`,
|
||||
err: `proto: required field "testdata.MyMessage.count" not set`,
|
||||
out: &MyMessage{
|
||||
Name: String("Pawel"),
|
||||
},
|
||||
},
|
||||
|
||||
// Repeated non-repeated field
|
||||
{
|
||||
in: `name: "Rob" name: "Russ"`,
|
||||
err: `line 1.12: non-repeated field "name" was repeated`,
|
||||
},
|
||||
|
||||
// Group
|
||||
{
|
||||
in: `count: 17 SomeGroup { group_field: 12 }`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(17),
|
||||
Somegroup: &MyMessage_SomeGroup{
|
||||
GroupField: Int32(12),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// Semicolon between fields
|
||||
{
|
||||
in: `count:3;name:"Calvin"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(3),
|
||||
Name: String("Calvin"),
|
||||
},
|
||||
},
|
||||
// Comma between fields
|
||||
{
|
||||
in: `count:4,name:"Ezekiel"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(4),
|
||||
Name: String("Ezekiel"),
|
||||
},
|
||||
},
|
||||
|
||||
// Extension
|
||||
buildExtStructTest(`count: 42 [testdata.Ext.more]:<data:"Hello, world!" >`),
|
||||
buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`),
|
||||
buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`),
|
||||
buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`),
|
||||
|
||||
// Big all-in-one
|
||||
{
|
||||
in: "count:42 # Meaning\n" +
|
||||
`name:"Dave" ` +
|
||||
`quote:"\"I didn't want to go.\"" ` +
|
||||
`pet:"bunny" ` +
|
||||
`pet:"kitty" ` +
|
||||
`pet:"horsey" ` +
|
||||
`inner:<` +
|
||||
` host:"footrest.syd" ` +
|
||||
` port:7001 ` +
|
||||
` connected:true ` +
|
||||
`> ` +
|
||||
`others:<` +
|
||||
` key:3735928559 ` +
|
||||
` value:"\x01A\a\f" ` +
|
||||
`> ` +
|
||||
`others:<` +
|
||||
" weight:58.9 # Atomic weight of Co\n" +
|
||||
` inner:<` +
|
||||
` host:"lesha.mtv" ` +
|
||||
` port:8002 ` +
|
||||
` >` +
|
||||
`>`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("Dave"),
|
||||
Quote: String(`"I didn't want to go."`),
|
||||
Pet: []string{"bunny", "kitty", "horsey"},
|
||||
Inner: &InnerMessage{
|
||||
Host: String("footrest.syd"),
|
||||
Port: Int32(7001),
|
||||
Connected: Bool(true),
|
||||
},
|
||||
Others: []*OtherMessage{
|
||||
{
|
||||
Key: Int64(3735928559),
|
||||
Value: []byte{0x1, 'A', '\a', '\f'},
|
||||
},
|
||||
{
|
||||
Weight: Float32(58.9),
|
||||
Inner: &InnerMessage{
|
||||
Host: String("lesha.mtv"),
|
||||
Port: Int32(8002),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestUnmarshalText(t *testing.T) {
|
||||
for i, test := range unMarshalTextTests {
|
||||
pb := new(MyMessage)
|
||||
err := UnmarshalText(test.in, pb)
|
||||
if test.err == "" {
|
||||
// We don't expect failure.
|
||||
if err != nil {
|
||||
t.Errorf("Test %d: Unexpected error: %v", i, err)
|
||||
} else if !reflect.DeepEqual(pb, test.out) {
|
||||
t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
|
||||
i, pb, test.out)
|
||||
}
|
||||
} else {
|
||||
// We do expect failure.
|
||||
if err == nil {
|
||||
t.Errorf("Test %d: Didn't get expected error: %v", i, test.err)
|
||||
} else if err.Error() != test.err {
|
||||
t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v",
|
||||
i, err.Error(), test.err)
|
||||
} else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) {
|
||||
t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
|
||||
i, pb, test.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalTextCustomMessage(t *testing.T) {
|
||||
msg := &textMessage{}
|
||||
if err := UnmarshalText("custom", msg); err != nil {
|
||||
t.Errorf("Unexpected error from custom unmarshal: %v", err)
|
||||
}
|
||||
if UnmarshalText("not custom", msg) == nil {
|
||||
t.Errorf("Didn't get expected error from custom unmarshal")
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test; this caused a panic.
|
||||
func TestRepeatedEnum(t *testing.T) {
|
||||
pb := new(RepeatedEnum)
|
||||
if err := UnmarshalText("color: RED", pb); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
exp := &RepeatedEnum{
|
||||
Color: []RepeatedEnum_Color{RepeatedEnum_RED},
|
||||
}
|
||||
if !Equal(pb, exp) {
|
||||
t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp)
|
||||
}
|
||||
}
|
||||
|
||||
var benchInput string
|
||||
|
||||
func init() {
|
||||
benchInput = "count: 4\n"
|
||||
for i := 0; i < 1000; i++ {
|
||||
benchInput += "pet: \"fido\"\n"
|
||||
}
|
||||
|
||||
// Check it is valid input.
|
||||
pb := new(MyMessage)
|
||||
err := UnmarshalText(benchInput, pb)
|
||||
if err != nil {
|
||||
panic("Bad benchmark input: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalText(b *testing.B) {
|
||||
pb := new(MyMessage)
|
||||
for i := 0; i < b.N; i++ {
|
||||
UnmarshalText(benchInput, pb)
|
||||
}
|
||||
b.SetBytes(int64(len(benchInput)))
|
||||
}
|
||||
408
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_test.go
generated
vendored
Normal file
408
Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_test.go
generated
vendored
Normal file
@@ -0,0 +1,408 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// http://code.google.com/p/goprotobuf/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"code.google.com/p/goprotobuf/proto"
|
||||
|
||||
pb "./testdata"
|
||||
)
|
||||
|
||||
// textMessage implements the methods that allow it to marshal and unmarshal
|
||||
// itself as text.
|
||||
type textMessage struct {
|
||||
}
|
||||
|
||||
func (*textMessage) MarshalText() ([]byte, error) {
|
||||
return []byte("custom"), nil
|
||||
}
|
||||
|
||||
func (*textMessage) UnmarshalText(bytes []byte) error {
|
||||
if string(bytes) != "custom" {
|
||||
return errors.New("expected 'custom'")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*textMessage) Reset() {}
|
||||
func (*textMessage) String() string { return "" }
|
||||
func (*textMessage) ProtoMessage() {}
|
||||
|
||||
func newTestMessage() *pb.MyMessage {
|
||||
msg := &pb.MyMessage{
|
||||
Count: proto.Int32(42),
|
||||
Name: proto.String("Dave"),
|
||||
Quote: proto.String(`"I didn't want to go."`),
|
||||
Pet: []string{"bunny", "kitty", "horsey"},
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("footrest.syd"),
|
||||
Port: proto.Int32(7001),
|
||||
Connected: proto.Bool(true),
|
||||
},
|
||||
Others: []*pb.OtherMessage{
|
||||
{
|
||||
Key: proto.Int64(0xdeadbeef),
|
||||
Value: []byte{1, 65, 7, 12},
|
||||
},
|
||||
{
|
||||
Weight: proto.Float32(6.022),
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("lesha.mtv"),
|
||||
Port: proto.Int32(8002),
|
||||
},
|
||||
},
|
||||
},
|
||||
Bikeshed: pb.MyMessage_BLUE.Enum(),
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: proto.Int32(8),
|
||||
},
|
||||
// One normally wouldn't do this.
|
||||
// This is an undeclared tag 13, as a varint (wire type 0) with value 4.
|
||||
XXX_unrecognized: []byte{13<<3 | 0, 4},
|
||||
}
|
||||
ext := &pb.Ext{
|
||||
Data: proto.String("Big gobs for big rats"),
|
||||
}
|
||||
if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
greetings := []string{"adg", "easy", "cow"}
|
||||
if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Add an unknown extension. We marshal a pb.Ext, and fake the ID.
|
||||
b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...)
|
||||
proto.SetRawExtension(msg, 201, b)
|
||||
|
||||
// Extensions can be plain fields, too, so let's test that.
|
||||
b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19)
|
||||
proto.SetRawExtension(msg, 202, b)
|
||||
|
||||
return msg
|
||||
}
|
||||
|
||||
const text = `count: 42
|
||||
name: "Dave"
|
||||
quote: "\"I didn't want to go.\""
|
||||
pet: "bunny"
|
||||
pet: "kitty"
|
||||
pet: "horsey"
|
||||
inner: <
|
||||
host: "footrest.syd"
|
||||
port: 7001
|
||||
connected: true
|
||||
>
|
||||
others: <
|
||||
key: 3735928559
|
||||
value: "\001A\007\014"
|
||||
>
|
||||
others: <
|
||||
weight: 6.022
|
||||
inner: <
|
||||
host: "lesha.mtv"
|
||||
port: 8002
|
||||
>
|
||||
>
|
||||
bikeshed: BLUE
|
||||
SomeGroup {
|
||||
group_field: 8
|
||||
}
|
||||
/* 2 unknown bytes */
|
||||
13: 4
|
||||
[testdata.Ext.more]: <
|
||||
data: "Big gobs for big rats"
|
||||
>
|
||||
[testdata.greeting]: "adg"
|
||||
[testdata.greeting]: "easy"
|
||||
[testdata.greeting]: "cow"
|
||||
/* 13 unknown bytes */
|
||||
201: "\t3G skiing"
|
||||
/* 3 unknown bytes */
|
||||
202: 19
|
||||
`
|
||||
|
||||
func TestMarshalText(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := proto.MarshalText(buf, newTestMessage()); err != nil {
|
||||
t.Fatalf("proto.MarshalText: %v", err)
|
||||
}
|
||||
s := buf.String()
|
||||
if s != text {
|
||||
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalTextCustomMessage(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := proto.MarshalText(buf, &textMessage{}); err != nil {
|
||||
t.Fatalf("proto.MarshalText: %v", err)
|
||||
}
|
||||
s := buf.String()
|
||||
if s != "custom" {
|
||||
t.Errorf("Got %q, expected %q", s, "custom")
|
||||
}
|
||||
}
|
||||
func TestMarshalTextNil(t *testing.T) {
|
||||
want := "<nil>"
|
||||
tests := []proto.Message{nil, (*pb.MyMessage)(nil)}
|
||||
for i, test := range tests {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := proto.MarshalText(buf, test); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got := buf.String(); got != want {
|
||||
t.Errorf("%d: got %q want %q", i, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalTextUnknownEnum(t *testing.T) {
|
||||
// The Color enum only specifies values 0-2.
|
||||
m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()}
|
||||
got := m.String()
|
||||
const want = `bikeshed:3 `
|
||||
if got != want {
|
||||
t.Errorf("\n got %q\nwant %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalTextBuffered(b *testing.B) {
|
||||
buf := new(bytes.Buffer)
|
||||
m := newTestMessage()
|
||||
for i := 0; i < b.N; i++ {
|
||||
buf.Reset()
|
||||
proto.MarshalText(buf, m)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalTextUnbuffered(b *testing.B) {
|
||||
w := ioutil.Discard
|
||||
m := newTestMessage()
|
||||
for i := 0; i < b.N; i++ {
|
||||
proto.MarshalText(w, m)
|
||||
}
|
||||
}
|
||||
|
||||
func compact(src string) string {
|
||||
// s/[ \n]+/ /g; s/ $//;
|
||||
dst := make([]byte, len(src))
|
||||
space, comment := false, false
|
||||
j := 0
|
||||
for i := 0; i < len(src); i++ {
|
||||
if strings.HasPrefix(src[i:], "/*") {
|
||||
comment = true
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if comment && strings.HasPrefix(src[i:], "*/") {
|
||||
comment = false
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if comment {
|
||||
continue
|
||||
}
|
||||
c := src[i]
|
||||
if c == ' ' || c == '\n' {
|
||||
space = true
|
||||
continue
|
||||
}
|
||||
if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') {
|
||||
space = false
|
||||
}
|
||||
if c == '{' {
|
||||
space = false
|
||||
}
|
||||
if space {
|
||||
dst[j] = ' '
|
||||
j++
|
||||
space = false
|
||||
}
|
||||
dst[j] = c
|
||||
j++
|
||||
}
|
||||
if space {
|
||||
dst[j] = ' '
|
||||
j++
|
||||
}
|
||||
return string(dst[0:j])
|
||||
}
|
||||
|
||||
var compactText = compact(text)
|
||||
|
||||
func TestCompactText(t *testing.T) {
|
||||
s := proto.CompactTextString(newTestMessage())
|
||||
if s != compactText {
|
||||
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringEscaping(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in *pb.Strings
|
||||
out string
|
||||
}{
|
||||
{
|
||||
// Test data from C++ test (TextFormatTest.StringEscape).
|
||||
// Single divergence: we don't escape apostrophes.
|
||||
&pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")},
|
||||
"string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n",
|
||||
},
|
||||
{
|
||||
// Test data from the same C++ test.
|
||||
&pb.Strings{StringField: proto.String("\350\260\267\346\255\214")},
|
||||
"string_field: \"\\350\\260\\267\\346\\255\\214\"\n",
|
||||
},
|
||||
{
|
||||
// Some UTF-8.
|
||||
&pb.Strings{StringField: proto.String("\x00\x01\xff\x81")},
|
||||
`string_field: "\000\001\377\201"` + "\n",
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
var buf bytes.Buffer
|
||||
if err := proto.MarshalText(&buf, tc.in); err != nil {
|
||||
t.Errorf("proto.MarsalText: %v", err)
|
||||
continue
|
||||
}
|
||||
s := buf.String()
|
||||
if s != tc.out {
|
||||
t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check round-trip.
|
||||
pb := new(pb.Strings)
|
||||
if err := proto.UnmarshalText(s, pb); err != nil {
|
||||
t.Errorf("#%d: UnmarshalText: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if !proto.Equal(pb, tc.in) {
|
||||
t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A limitedWriter accepts some output before it fails.
|
||||
// This is a proxy for something like a nearly-full or imminently-failing disk,
|
||||
// or a network connection that is about to die.
|
||||
type limitedWriter struct {
|
||||
b bytes.Buffer
|
||||
limit int
|
||||
}
|
||||
|
||||
var outOfSpace = errors.New("proto: insufficient space")
|
||||
|
||||
func (w *limitedWriter) Write(p []byte) (n int, err error) {
|
||||
var avail = w.limit - w.b.Len()
|
||||
if avail <= 0 {
|
||||
return 0, outOfSpace
|
||||
}
|
||||
if len(p) <= avail {
|
||||
return w.b.Write(p)
|
||||
}
|
||||
n, _ = w.b.Write(p[:avail])
|
||||
return n, outOfSpace
|
||||
}
|
||||
|
||||
func TestMarshalTextFailing(t *testing.T) {
|
||||
// Try lots of different sizes to exercise more error code-paths.
|
||||
for lim := 0; lim < len(text); lim++ {
|
||||
buf := new(limitedWriter)
|
||||
buf.limit = lim
|
||||
err := proto.MarshalText(buf, newTestMessage())
|
||||
// We expect a certain error, but also some partial results in the buffer.
|
||||
if err != outOfSpace {
|
||||
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace)
|
||||
}
|
||||
s := buf.b.String()
|
||||
x := text[:buf.limit]
|
||||
if s != x {
|
||||
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFloats(t *testing.T) {
|
||||
tests := []struct {
|
||||
f float64
|
||||
want string
|
||||
}{
|
||||
{0, "0"},
|
||||
{4.7, "4.7"},
|
||||
{math.Inf(1), "inf"},
|
||||
{math.Inf(-1), "-inf"},
|
||||
{math.NaN(), "nan"},
|
||||
}
|
||||
for _, test := range tests {
|
||||
msg := &pb.FloatingPoint{F: &test.f}
|
||||
got := strings.TrimSpace(msg.String())
|
||||
want := `f:` + test.want
|
||||
if got != want {
|
||||
t.Errorf("f=%f: got %q, want %q", test.f, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepeatedNilText(t *testing.T) {
|
||||
m := &pb.MessageList{
|
||||
Message: []*pb.MessageList_Message{
|
||||
nil,
|
||||
&pb.MessageList_Message{
|
||||
Name: proto.String("Horse"),
|
||||
},
|
||||
nil,
|
||||
},
|
||||
}
|
||||
want := `Message <nil>
|
||||
Message {
|
||||
name: "Horse"
|
||||
}
|
||||
Message <nil>
|
||||
`
|
||||
if s := proto.MarshalTextString(m); s != want {
|
||||
t.Errorf(" got: %s\nwant: %s", s, want)
|
||||
}
|
||||
}
|
||||
355
Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/all_test.go
generated
vendored
Normal file
355
Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/all_test.go
generated
vendored
Normal file
@@ -0,0 +1,355 @@
|
||||
// Copyright 2013 Matt T. Proud
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ext
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
. "code.google.com/p/goprotobuf/proto"
|
||||
. "code.google.com/p/goprotobuf/proto/testdata"
|
||||
)
|
||||
|
||||
func TestWriteDelimited(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
msg Message
|
||||
buf []byte
|
||||
n int
|
||||
err error
|
||||
}{
|
||||
{
|
||||
msg: &Empty{},
|
||||
n: 1,
|
||||
buf: []byte{0},
|
||||
},
|
||||
{
|
||||
msg: &GoEnum{Foo: FOO_FOO1.Enum()},
|
||||
n: 3,
|
||||
buf: []byte{2, 8, 1},
|
||||
},
|
||||
{
|
||||
msg: &Strings{
|
||||
StringField: String(`This is my gigantic, unhappy string. It exceeds
|
||||
the encoding size of a single byte varint. We are using it to fuzz test the
|
||||
correctness of the header decoding mechanisms, which may prove problematic.
|
||||
I expect it may. Let's hope you enjoy testing as much as we do.`),
|
||||
},
|
||||
n: 271,
|
||||
buf: []byte{141, 2, 10, 138, 2, 84, 104, 105, 115, 32, 105, 115, 32, 109,
|
||||
121, 32, 103, 105, 103, 97, 110, 116, 105, 99, 44, 32, 117, 110, 104,
|
||||
97, 112, 112, 121, 32, 115, 116, 114, 105, 110, 103, 46, 32, 32, 73,
|
||||
116, 32, 101, 120, 99, 101, 101, 100, 115, 10, 116, 104, 101, 32, 101,
|
||||
110, 99, 111, 100, 105, 110, 103, 32, 115, 105, 122, 101, 32, 111, 102,
|
||||
32, 97, 32, 115, 105, 110, 103, 108, 101, 32, 98, 121, 116, 101, 32,
|
||||
118, 97, 114, 105, 110, 116, 46, 32, 32, 87, 101, 32, 97, 114, 101, 32,
|
||||
117, 115, 105, 110, 103, 32, 105, 116, 32, 116, 111, 32, 102, 117, 122,
|
||||
122, 32, 116, 101, 115, 116, 32, 116, 104, 101, 10, 99, 111, 114, 114,
|
||||
101, 99, 116, 110, 101, 115, 115, 32, 111, 102, 32, 116, 104, 101, 32,
|
||||
104, 101, 97, 100, 101, 114, 32, 100, 101, 99, 111, 100, 105, 110, 103,
|
||||
32, 109, 101, 99, 104, 97, 110, 105, 115, 109, 115, 44, 32, 119, 104,
|
||||
105, 99, 104, 32, 109, 97, 121, 32, 112, 114, 111, 118, 101, 32, 112,
|
||||
114, 111, 98, 108, 101, 109, 97, 116, 105, 99, 46, 10, 73, 32, 101, 120,
|
||||
112, 101, 99, 116, 32, 105, 116, 32, 109, 97, 121, 46, 32, 32, 76, 101,
|
||||
116, 39, 115, 32, 104, 111, 112, 101, 32, 121, 111, 117, 32, 101, 110,
|
||||
106, 111, 121, 32, 116, 101, 115, 116, 105, 110, 103, 32, 97, 115, 32,
|
||||
109, 117, 99, 104, 32, 97, 115, 32, 119, 101, 32, 100, 111, 46},
|
||||
},
|
||||
} {
|
||||
var buf bytes.Buffer
|
||||
if n, err := WriteDelimited(&buf, test.msg); n != test.n || err != test.err {
|
||||
t.Fatalf("WriteDelimited(buf, %#v) = %v, %v; want %v, %v", test.msg, n, err, test.n, test.err)
|
||||
}
|
||||
if out := buf.Bytes(); !bytes.Equal(out, test.buf) {
|
||||
t.Fatalf("WriteDelimited(buf, %#v); buf = %v; want %v", test.msg, out, test.buf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadDelimited(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
buf []byte
|
||||
msg Message
|
||||
n int
|
||||
err error
|
||||
}{
|
||||
{
|
||||
buf: []byte{0},
|
||||
msg: &Empty{},
|
||||
n: 1,
|
||||
},
|
||||
{
|
||||
n: 3,
|
||||
buf: []byte{2, 8, 1},
|
||||
msg: &GoEnum{Foo: FOO_FOO1.Enum()},
|
||||
},
|
||||
{
|
||||
buf: []byte{141, 2, 10, 138, 2, 84, 104, 105, 115, 32, 105, 115, 32, 109,
|
||||
121, 32, 103, 105, 103, 97, 110, 116, 105, 99, 44, 32, 117, 110, 104,
|
||||
97, 112, 112, 121, 32, 115, 116, 114, 105, 110, 103, 46, 32, 32, 73,
|
||||
116, 32, 101, 120, 99, 101, 101, 100, 115, 10, 116, 104, 101, 32, 101,
|
||||
110, 99, 111, 100, 105, 110, 103, 32, 115, 105, 122, 101, 32, 111, 102,
|
||||
32, 97, 32, 115, 105, 110, 103, 108, 101, 32, 98, 121, 116, 101, 32,
|
||||
118, 97, 114, 105, 110, 116, 46, 32, 32, 87, 101, 32, 97, 114, 101, 32,
|
||||
117, 115, 105, 110, 103, 32, 105, 116, 32, 116, 111, 32, 102, 117, 122,
|
||||
122, 32, 116, 101, 115, 116, 32, 116, 104, 101, 10, 99, 111, 114, 114,
|
||||
101, 99, 116, 110, 101, 115, 115, 32, 111, 102, 32, 116, 104, 101, 32,
|
||||
104, 101, 97, 100, 101, 114, 32, 100, 101, 99, 111, 100, 105, 110, 103,
|
||||
32, 109, 101, 99, 104, 97, 110, 105, 115, 109, 115, 44, 32, 119, 104,
|
||||
105, 99, 104, 32, 109, 97, 121, 32, 112, 114, 111, 118, 101, 32, 112,
|
||||
114, 111, 98, 108, 101, 109, 97, 116, 105, 99, 46, 10, 73, 32, 101, 120,
|
||||
112, 101, 99, 116, 32, 105, 116, 32, 109, 97, 121, 46, 32, 32, 76, 101,
|
||||
116, 39, 115, 32, 104, 111, 112, 101, 32, 121, 111, 117, 32, 101, 110,
|
||||
106, 111, 121, 32, 116, 101, 115, 116, 105, 110, 103, 32, 97, 115, 32,
|
||||
109, 117, 99, 104, 32, 97, 115, 32, 119, 101, 32, 100, 111, 46},
|
||||
msg: &Strings{
|
||||
StringField: String(`This is my gigantic, unhappy string. It exceeds
|
||||
the encoding size of a single byte varint. We are using it to fuzz test the
|
||||
correctness of the header decoding mechanisms, which may prove problematic.
|
||||
I expect it may. Let's hope you enjoy testing as much as we do.`),
|
||||
},
|
||||
n: 271,
|
||||
},
|
||||
} {
|
||||
msg := Clone(test.msg)
|
||||
msg.Reset()
|
||||
if n, err := ReadDelimited(bytes.NewBuffer(test.buf), msg); n != test.n || err != test.err {
|
||||
t.Fatalf("ReadDelimited(%v, msg) = %v, %v; want %v, %v", test.buf, n, err, test.n, test.err)
|
||||
}
|
||||
if !Equal(msg, test.msg) {
|
||||
t.Fatalf("ReadDelimited(%v, msg); msg = %v; want %v", test.buf, msg, test.msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEndToEndValid(t *testing.T) {
|
||||
for _, test := range [][]Message{
|
||||
[]Message{&Empty{}},
|
||||
[]Message{&GoEnum{Foo: FOO_FOO1.Enum()}, &Empty{}, &GoEnum{Foo: FOO_FOO1.Enum()}},
|
||||
[]Message{&GoEnum{Foo: FOO_FOO1.Enum()}},
|
||||
[]Message{&Strings{
|
||||
StringField: String(`This is my gigantic, unhappy string. It exceeds
|
||||
the encoding size of a single byte varint. We are using it to fuzz test the
|
||||
correctness of the header decoding mechanisms, which may prove problematic.
|
||||
I expect it may. Let's hope you enjoy testing as much as we do.`),
|
||||
}},
|
||||
} {
|
||||
var buf bytes.Buffer
|
||||
var written int
|
||||
for i, msg := range test {
|
||||
n, err := WriteDelimited(&buf, msg)
|
||||
if err != nil {
|
||||
// Assumption: TestReadDelimited and TestWriteDelimited are sufficient
|
||||
// and inputs for this test are explicitly exercised there.
|
||||
t.Fatalf("WriteDelimited(buf, %v[%d]) = ?, %v; wanted ?, nil", test, i, err)
|
||||
}
|
||||
written += n
|
||||
}
|
||||
var read int
|
||||
for i, msg := range test {
|
||||
out := Clone(msg)
|
||||
out.Reset()
|
||||
n, _ := ReadDelimited(&buf, out)
|
||||
// Decide to do EOF checking?
|
||||
read += n
|
||||
if !Equal(out, msg) {
|
||||
t.Fatalf("out = %v; want %v[%d] = %#v", out, test, i, msg)
|
||||
}
|
||||
}
|
||||
if read != written {
|
||||
t.Fatalf("%v read = %d; want %d", test, read, written)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// visitMessage empties the private state fields of the quick.Value()-generated
|
||||
// Protocol Buffer messages, for they cause an inordinate amount of problems.
|
||||
// This is because we are using an automated fuzz generator on a type with
|
||||
// private fields.
|
||||
func visitMessage(m Message) {
|
||||
t := reflect.TypeOf(m)
|
||||
if t.Kind() != reflect.Ptr {
|
||||
return
|
||||
}
|
||||
derefed := t.Elem()
|
||||
if derefed.Kind() != reflect.Struct {
|
||||
return
|
||||
}
|
||||
v := reflect.ValueOf(m)
|
||||
elem := v.Elem()
|
||||
for i := 0; i < elem.NumField(); i++ {
|
||||
field := elem.FieldByIndex([]int{i})
|
||||
fieldType := field.Type()
|
||||
if fieldType.Implements(reflect.TypeOf((*Message)(nil)).Elem()) {
|
||||
visitMessage(field.Interface().(Message))
|
||||
}
|
||||
if field.Kind() == reflect.Slice {
|
||||
for i := 0; i < field.Len(); i++ {
|
||||
elem := field.Index(i)
|
||||
elemType := elem.Type()
|
||||
if elemType.Implements(reflect.TypeOf((*Message)(nil)).Elem()) {
|
||||
visitMessage(elem.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if field := elem.FieldByName("XXX_unrecognized"); field.IsValid() {
|
||||
field.Set(reflect.ValueOf([]byte{}))
|
||||
}
|
||||
if field := elem.FieldByName("XXX_extensions"); field.IsValid() {
|
||||
field.Set(reflect.ValueOf(nil))
|
||||
}
|
||||
}
|
||||
|
||||
// rndMessage generates a random valid Protocol Buffer message.
|
||||
func rndMessage(r *rand.Rand) Message {
|
||||
var t reflect.Type
|
||||
switch v := rand.Intn(23); v {
|
||||
// TODO(br): Uncomment the elements below once fix is incorporated, except
|
||||
// for the elements marked as patently incompatible.
|
||||
// case 0:
|
||||
// t = reflect.TypeOf(&GoEnum{})
|
||||
// break
|
||||
// case 1:
|
||||
// t = reflect.TypeOf(&GoTestField{})
|
||||
// break
|
||||
case 2:
|
||||
t = reflect.TypeOf(&GoTest{})
|
||||
break
|
||||
// case 3:
|
||||
// t = reflect.TypeOf(&GoSkipTest{})
|
||||
// break
|
||||
// case 4:
|
||||
// t = reflect.TypeOf(&NonPackedTest{})
|
||||
// break
|
||||
// case 5:
|
||||
// t = reflect.TypeOf(&PackedTest{})
|
||||
// break
|
||||
case 6:
|
||||
t = reflect.TypeOf(&MaxTag{})
|
||||
break
|
||||
case 7:
|
||||
t = reflect.TypeOf(&OldMessage{})
|
||||
break
|
||||
case 8:
|
||||
t = reflect.TypeOf(&NewMessage{})
|
||||
break
|
||||
case 9:
|
||||
t = reflect.TypeOf(&InnerMessage{})
|
||||
break
|
||||
case 10:
|
||||
t = reflect.TypeOf(&OtherMessage{})
|
||||
break
|
||||
case 11:
|
||||
// PATENTLY INVALID FOR FUZZ GENERATION
|
||||
// t = reflect.TypeOf(&MyMessage{})
|
||||
break
|
||||
// case 12:
|
||||
// t = reflect.TypeOf(&Ext{})
|
||||
// break
|
||||
case 13:
|
||||
// PATENTLY INVALID FOR FUZZ GENERATION
|
||||
// t = reflect.TypeOf(&MyMessageSet{})
|
||||
break
|
||||
// case 14:
|
||||
// t = reflect.TypeOf(&Empty{})
|
||||
// break
|
||||
// case 15:
|
||||
// t = reflect.TypeOf(&MessageList{})
|
||||
// break
|
||||
// case 16:
|
||||
// t = reflect.TypeOf(&Strings{})
|
||||
// break
|
||||
// case 17:
|
||||
// t = reflect.TypeOf(&Defaults{})
|
||||
// break
|
||||
// case 17:
|
||||
// t = reflect.TypeOf(&SubDefaults{})
|
||||
// break
|
||||
// case 18:
|
||||
// t = reflect.TypeOf(&RepeatedEnum{})
|
||||
// break
|
||||
case 19:
|
||||
t = reflect.TypeOf(&MoreRepeated{})
|
||||
break
|
||||
// case 20:
|
||||
// t = reflect.TypeOf(&GroupOld{})
|
||||
// break
|
||||
// case 21:
|
||||
// t = reflect.TypeOf(&GroupNew{})
|
||||
// break
|
||||
case 22:
|
||||
t = reflect.TypeOf(&FloatingPoint{})
|
||||
break
|
||||
default:
|
||||
// TODO(br): Replace with an unreachable once fixed.
|
||||
t = reflect.TypeOf(&GoTest{})
|
||||
break
|
||||
}
|
||||
if t == nil {
|
||||
t = reflect.TypeOf(&GoTest{})
|
||||
}
|
||||
v, ok := quick.Value(t, r)
|
||||
if !ok {
|
||||
panic("attempt to generate illegal item; consult item 11")
|
||||
}
|
||||
visitMessage(v.Interface().(Message))
|
||||
return v.Interface().(Message)
|
||||
}
|
||||
|
||||
// rndMessages generates several random Protocol Buffer messages.
|
||||
func rndMessages(r *rand.Rand) []Message {
|
||||
n := r.Intn(128)
|
||||
out := make([]Message, 0, n)
|
||||
for i := 0; i < n; i++ {
|
||||
out = append(out, rndMessage(r))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func TestFuzz(t *testing.T) {
|
||||
rnd := rand.New(rand.NewSource(42))
|
||||
check := func() bool {
|
||||
messages := rndMessages(rnd)
|
||||
var buf bytes.Buffer
|
||||
var written int
|
||||
for i, msg := range messages {
|
||||
n, err := WriteDelimited(&buf, msg)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteDelimited(buf, %v[%d]) = ?, %v; wanted ?, nil", messages, i, err)
|
||||
}
|
||||
written += n
|
||||
}
|
||||
var read int
|
||||
for i, msg := range messages {
|
||||
out := Clone(msg)
|
||||
out.Reset()
|
||||
n, _ := ReadDelimited(&buf, out)
|
||||
read += n
|
||||
if !Equal(out, msg) {
|
||||
t.Fatalf("out = %v; want %v[%d] = %#v", out, messages, i, msg)
|
||||
}
|
||||
}
|
||||
if read != written {
|
||||
t.Fatalf("%v read = %d; want %d", messages, read, written)
|
||||
}
|
||||
return true
|
||||
}
|
||||
if err := quick.Check(check, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
75
Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/decode.go
generated
vendored
Normal file
75
Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/decode.go
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright 2013 Matt T. Proud
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ext
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"code.google.com/p/goprotobuf/proto"
|
||||
)
|
||||
|
||||
var errInvalidVarint = errors.New("invalid varint32 encountered")
|
||||
|
||||
// ReadDelimited decodes a message from the provided length-delimited stream,
|
||||
// where the length is encoded as 32-bit varint prefix to the message body.
|
||||
// It returns the total number of bytes read and any applicable error. This is
|
||||
// roughly equivalent to the companion Java API's
|
||||
// MessageLite#parseDelimitedFrom. As per the reader contract, this function
|
||||
// calls r.Read repeatedly as required until exactly one message including its
|
||||
// prefix is read and decoded (or an error has occurred). The function never
|
||||
// reads more bytes from the stream than required. The function never returns
|
||||
// an error if a message has been read and decoded correctly, even if the end
|
||||
// of the stream has been reached in doing so. In that case, any subsequent
|
||||
// calls return (0, io.EOF).
|
||||
func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
|
||||
// Per AbstractParser#parsePartialDelimitedFrom with
|
||||
// CodedInputStream#readRawVarint32.
|
||||
headerBuf := make([]byte, binary.MaxVarintLen32)
|
||||
var bytesRead, varIntBytes int
|
||||
var messageLength uint64
|
||||
for varIntBytes == 0 { // i.e. no varint has been decoded yet.
|
||||
if bytesRead >= len(headerBuf) {
|
||||
return bytesRead, errInvalidVarint
|
||||
}
|
||||
// We have to read byte by byte here to avoid reading more bytes
|
||||
// than required. Each read byte is appended to what we have
|
||||
// read before.
|
||||
newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
|
||||
if newBytesRead == 0 {
|
||||
if err != nil {
|
||||
return bytesRead, err
|
||||
}
|
||||
// A Reader should not return (0, nil), but if it does,
|
||||
// it should be treated as no-op (according to the
|
||||
// Reader contract). So let's go on...
|
||||
continue
|
||||
}
|
||||
bytesRead += newBytesRead
|
||||
// Now present everything read so far to the varint decoder and
|
||||
// see if a varint can be decoded already.
|
||||
messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
|
||||
}
|
||||
|
||||
messageBuf := make([]byte, messageLength)
|
||||
newBytesRead, err := io.ReadFull(r, messageBuf)
|
||||
bytesRead += newBytesRead
|
||||
if err != nil {
|
||||
return bytesRead, err
|
||||
}
|
||||
|
||||
return bytesRead, proto.Unmarshal(messageBuf, m)
|
||||
}
|
||||
16
Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/doc.go
generated
vendored
Normal file
16
Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/doc.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2013 Matt T. Proud
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package ext enables record length-delimited Protocol Buffer streaming.
|
||||
package ext
|
||||
46
Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/encode.go
generated
vendored
Normal file
46
Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/encode.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
// Copyright 2013 Matt T. Proud
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ext
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
"code.google.com/p/goprotobuf/proto"
|
||||
)
|
||||
|
||||
// WriteDelimited encodes and dumps a message to the provided writer prefixed
|
||||
// with a 32-bit varint indicating the length of the encoded message, producing
|
||||
// a length-delimited record stream, which can be used to chain together
|
||||
// encoded messages of the same type together in a file. It returns the total
|
||||
// number of bytes written and any applicable error. This is roughly
|
||||
// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
|
||||
func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
|
||||
buffer, err := proto.Marshal(m)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
buf := make([]byte, binary.MaxVarintLen32)
|
||||
encodedLength := binary.PutUvarint(buf, uint64(len(buffer)))
|
||||
|
||||
sync, err := w.Write(buf[:encodedLength])
|
||||
if err != nil {
|
||||
return sync, err
|
||||
}
|
||||
|
||||
n, err = w.Write(buffer)
|
||||
return n + sync, err
|
||||
}
|
||||
103
Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/fixtures_test.go
generated
vendored
Normal file
103
Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/fixtures_test.go
generated
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// http://code.google.com/p/goprotobuf/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package ext
|
||||
|
||||
import (
|
||||
. "code.google.com/p/goprotobuf/proto"
|
||||
. "code.google.com/p/goprotobuf/proto/testdata"
|
||||
)
|
||||
|
||||
// FROM https://code.google.com/p/goprotobuf/source/browse/proto/all_test.go.
|
||||
|
||||
func initGoTestField() *GoTestField {
|
||||
f := new(GoTestField)
|
||||
f.Label = String("label")
|
||||
f.Type = String("type")
|
||||
return f
|
||||
}
|
||||
|
||||
// These are all structurally equivalent but the tag numbers differ.
|
||||
// (It's remarkable that required, optional, and repeated all have
|
||||
// 8 letters.)
|
||||
func initGoTest_RequiredGroup() *GoTest_RequiredGroup {
|
||||
return &GoTest_RequiredGroup{
|
||||
RequiredField: String("required"),
|
||||
}
|
||||
}
|
||||
|
||||
func initGoTest_OptionalGroup() *GoTest_OptionalGroup {
|
||||
return &GoTest_OptionalGroup{
|
||||
RequiredField: String("optional"),
|
||||
}
|
||||
}
|
||||
|
||||
func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup {
|
||||
return &GoTest_RepeatedGroup{
|
||||
RequiredField: String("repeated"),
|
||||
}
|
||||
}
|
||||
|
||||
func initGoTest(setdefaults bool) *GoTest {
|
||||
pb := new(GoTest)
|
||||
if setdefaults {
|
||||
pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted)
|
||||
pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted)
|
||||
pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted)
|
||||
pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted)
|
||||
pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted)
|
||||
pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted)
|
||||
pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted)
|
||||
pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted)
|
||||
pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted)
|
||||
pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted)
|
||||
pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted
|
||||
pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted)
|
||||
pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted)
|
||||
}
|
||||
|
||||
pb.Kind = GoTest_TIME.Enum()
|
||||
pb.RequiredField = initGoTestField()
|
||||
pb.F_BoolRequired = Bool(true)
|
||||
pb.F_Int32Required = Int32(3)
|
||||
pb.F_Int64Required = Int64(6)
|
||||
pb.F_Fixed32Required = Uint32(32)
|
||||
pb.F_Fixed64Required = Uint64(64)
|
||||
pb.F_Uint32Required = Uint32(3232)
|
||||
pb.F_Uint64Required = Uint64(6464)
|
||||
pb.F_FloatRequired = Float32(3232)
|
||||
pb.F_DoubleRequired = Float64(6464)
|
||||
pb.F_StringRequired = String("string")
|
||||
pb.F_BytesRequired = []byte("bytes")
|
||||
pb.F_Sint32Required = Int32(-32)
|
||||
pb.F_Sint64Required = Int64(-64)
|
||||
pb.Requiredgroup = initGoTest_RequiredGroup()
|
||||
|
||||
return pb
|
||||
}
|
||||
1
Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/MANIFEST
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/MANIFEST
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
Imported at 75cd24fc2f2c from https://bitbucket.org/ww/goautoneg.
|
||||
13
Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/Makefile
generated
vendored
Normal file
13
Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/Makefile
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
include $(GOROOT)/src/Make.inc
|
||||
|
||||
TARG=bitbucket.org/ww/goautoneg
|
||||
GOFILES=autoneg.go
|
||||
|
||||
include $(GOROOT)/src/Make.pkg
|
||||
|
||||
format:
|
||||
gofmt -w *.go
|
||||
|
||||
docs:
|
||||
gomake clean
|
||||
godoc ${TARG} > README.txt
|
||||
67
Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/README.txt
generated
vendored
Normal file
67
Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/README.txt
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
PACKAGE
|
||||
|
||||
package goautoneg
|
||||
import "bitbucket.org/ww/goautoneg"
|
||||
|
||||
HTTP Content-Type Autonegotiation.
|
||||
|
||||
The functions in this package implement the behaviour specified in
|
||||
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||
|
||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
||||
names of its contributors may be used to endorse or promote
|
||||
products derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
FUNCTIONS
|
||||
|
||||
func Negotiate(header string, alternatives []string) (content_type string)
|
||||
Negotiate the most appropriate content_type given the accept header
|
||||
and a list of alternatives.
|
||||
|
||||
func ParseAccept(header string) (accept []Accept)
|
||||
Parse an Accept Header string returning a sorted list
|
||||
of clauses
|
||||
|
||||
|
||||
TYPES
|
||||
|
||||
type Accept struct {
|
||||
Type, SubType string
|
||||
Q float32
|
||||
Params map[string]string
|
||||
}
|
||||
Structure to represent a clause in an HTTP Accept Header
|
||||
|
||||
|
||||
SUBDIRECTORIES
|
||||
|
||||
.hg
|
||||
162
Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/autoneg.go
generated
vendored
Normal file
162
Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/autoneg.go
generated
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
/*
|
||||
HTTP Content-Type Autonegotiation.
|
||||
|
||||
The functions in this package implement the behaviour specified in
|
||||
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||
|
||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
||||
names of its contributors may be used to endorse or promote
|
||||
products derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
*/
|
||||
package goautoneg
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Structure to represent a clause in an HTTP Accept Header
|
||||
type Accept struct {
|
||||
Type, SubType string
|
||||
Q float64
|
||||
Params map[string]string
|
||||
}
|
||||
|
||||
// For internal use, so that we can use the sort interface
|
||||
type accept_slice []Accept
|
||||
|
||||
func (accept accept_slice) Len() int {
|
||||
slice := []Accept(accept)
|
||||
return len(slice)
|
||||
}
|
||||
|
||||
func (accept accept_slice) Less(i, j int) bool {
|
||||
slice := []Accept(accept)
|
||||
ai, aj := slice[i], slice[j]
|
||||
if ai.Q > aj.Q {
|
||||
return true
|
||||
}
|
||||
if ai.Type != "*" && aj.Type == "*" {
|
||||
return true
|
||||
}
|
||||
if ai.SubType != "*" && aj.SubType == "*" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (accept accept_slice) Swap(i, j int) {
|
||||
slice := []Accept(accept)
|
||||
slice[i], slice[j] = slice[j], slice[i]
|
||||
}
|
||||
|
||||
// Parse an Accept Header string returning a sorted list
|
||||
// of clauses
|
||||
func ParseAccept(header string) (accept []Accept) {
|
||||
parts := strings.Split(header, ",")
|
||||
accept = make([]Accept, 0, len(parts))
|
||||
for _, part := range parts {
|
||||
part := strings.Trim(part, " ")
|
||||
|
||||
a := Accept{}
|
||||
a.Params = make(map[string]string)
|
||||
a.Q = 1.0
|
||||
|
||||
mrp := strings.Split(part, ";")
|
||||
|
||||
media_range := mrp[0]
|
||||
sp := strings.Split(media_range, "/")
|
||||
a.Type = strings.Trim(sp[0], " ")
|
||||
|
||||
switch {
|
||||
case len(sp) == 1 && a.Type == "*":
|
||||
a.SubType = "*"
|
||||
case len(sp) == 2:
|
||||
a.SubType = strings.Trim(sp[1], " ")
|
||||
default:
|
||||
continue
|
||||
}
|
||||
|
||||
if len(mrp) == 1 {
|
||||
accept = append(accept, a)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, param := range mrp[1:] {
|
||||
sp := strings.SplitN(param, "=", 2)
|
||||
if len(sp) != 2 {
|
||||
continue
|
||||
}
|
||||
token := strings.Trim(sp[0], " ")
|
||||
if token == "q" {
|
||||
a.Q, _ = strconv.ParseFloat(sp[1], 32)
|
||||
} else {
|
||||
a.Params[token] = strings.Trim(sp[1], " ")
|
||||
}
|
||||
}
|
||||
|
||||
accept = append(accept, a)
|
||||
}
|
||||
|
||||
slice := accept_slice(accept)
|
||||
sort.Sort(slice)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Negotiate the most appropriate content_type given the accept header
|
||||
// and a list of alternatives.
|
||||
func Negotiate(header string, alternatives []string) (content_type string) {
|
||||
asp := make([][]string, 0, len(alternatives))
|
||||
for _, ctype := range alternatives {
|
||||
asp = append(asp, strings.SplitN(ctype, "/", 2))
|
||||
}
|
||||
for _, clause := range ParseAccept(header) {
|
||||
for i, ctsp := range asp {
|
||||
if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
|
||||
content_type = alternatives[i]
|
||||
return
|
||||
}
|
||||
if clause.Type == ctsp[0] && clause.SubType == "*" {
|
||||
content_type = alternatives[i]
|
||||
return
|
||||
}
|
||||
if clause.Type == "*" && clause.SubType == "*" {
|
||||
content_type = alternatives[i]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
33
Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/autoneg_test.go
generated
vendored
Normal file
33
Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/autoneg_test.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
package goautoneg
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var chrome = "application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5"
|
||||
|
||||
func TestParseAccept(t *testing.T) {
|
||||
alternatives := []string{"text/html", "image/png"}
|
||||
content_type := Negotiate(chrome, alternatives)
|
||||
if content_type != "image/png" {
|
||||
t.Errorf("got %s expected image/png", content_type)
|
||||
}
|
||||
|
||||
alternatives = []string{"text/html", "text/plain", "text/n3"}
|
||||
content_type = Negotiate(chrome, alternatives)
|
||||
if content_type != "text/html" {
|
||||
t.Errorf("got %s expected text/html", content_type)
|
||||
}
|
||||
|
||||
alternatives = []string{"text/n3", "text/plain"}
|
||||
content_type = Negotiate(chrome, alternatives)
|
||||
if content_type != "text/plain" {
|
||||
t.Errorf("got %s expected text/plain", content_type)
|
||||
}
|
||||
|
||||
alternatives = []string{"text/n3", "application/rdf+xml"}
|
||||
content_type = Negotiate(chrome, alternatives)
|
||||
if content_type != "text/n3" {
|
||||
t.Errorf("got %s expected text/n3", content_type)
|
||||
}
|
||||
}
|
||||
63
Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/bench_test.go
generated
vendored
Normal file
63
Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/bench_test.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkInsertTargeted(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
|
||||
s := NewTargeted(Targets)
|
||||
b.ResetTimer()
|
||||
for i := float64(0); i < float64(b.N); i++ {
|
||||
s.Insert(i)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkInsertTargetedSmallEpsilon(b *testing.B) {
|
||||
s := NewTargeted(TargetsSmallEpsilon)
|
||||
b.ResetTimer()
|
||||
for i := float64(0); i < float64(b.N); i++ {
|
||||
s.Insert(i)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkInsertBiased(b *testing.B) {
|
||||
s := NewLowBiased(0.01)
|
||||
b.ResetTimer()
|
||||
for i := float64(0); i < float64(b.N); i++ {
|
||||
s.Insert(i)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkInsertBiasedSmallEpsilon(b *testing.B) {
|
||||
s := NewLowBiased(0.0001)
|
||||
b.ResetTimer()
|
||||
for i := float64(0); i < float64(b.N); i++ {
|
||||
s.Insert(i)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkQuery(b *testing.B) {
|
||||
s := NewTargeted(Targets)
|
||||
for i := float64(0); i < 1e6; i++ {
|
||||
s.Insert(i)
|
||||
}
|
||||
b.ResetTimer()
|
||||
n := float64(b.N)
|
||||
for i := float64(0); i < n; i++ {
|
||||
s.Query(i / n)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkQuerySmallEpsilon(b *testing.B) {
|
||||
s := NewTargeted(TargetsSmallEpsilon)
|
||||
for i := float64(0); i < 1e6; i++ {
|
||||
s.Insert(i)
|
||||
}
|
||||
b.ResetTimer()
|
||||
n := float64(b.N)
|
||||
for i := float64(0); i < n; i++ {
|
||||
s.Query(i / n)
|
||||
}
|
||||
}
|
||||
112
Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/example_test.go
generated
vendored
Normal file
112
Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/example_test.go
generated
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
// +build go1.1
|
||||
|
||||
package quantile_test
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"github.com/bmizerany/perks/quantile"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func Example_simple() {
|
||||
ch := make(chan float64)
|
||||
go sendFloats(ch)
|
||||
|
||||
// Compute the 50th, 90th, and 99th percentile.
|
||||
q := quantile.NewTargeted(0.50, 0.90, 0.99)
|
||||
for v := range ch {
|
||||
q.Insert(v)
|
||||
}
|
||||
|
||||
fmt.Println("perc50:", q.Query(0.50))
|
||||
fmt.Println("perc90:", q.Query(0.90))
|
||||
fmt.Println("perc99:", q.Query(0.99))
|
||||
fmt.Println("count:", q.Count())
|
||||
// Output:
|
||||
// perc50: 5
|
||||
// perc90: 14
|
||||
// perc99: 40
|
||||
// count: 2388
|
||||
}
|
||||
|
||||
func Example_mergeMultipleStreams() {
|
||||
// Scenario:
|
||||
// We have multiple database shards. On each shard, there is a process
|
||||
// collecting query response times from the database logs and inserting
|
||||
// them into a Stream (created via NewTargeted(0.90)), much like the
|
||||
// Simple example. These processes expose a network interface for us to
|
||||
// ask them to serialize and send us the results of their
|
||||
// Stream.Samples so we may Merge and Query them.
|
||||
//
|
||||
// NOTES:
|
||||
// * These sample sets are small, allowing us to get them
|
||||
// across the network much faster than sending the entire list of data
|
||||
// points.
|
||||
//
|
||||
// * For this to work correctly, we must supply the same quantiles
|
||||
// a priori the process collecting the samples supplied to NewTargeted,
|
||||
// even if we do not plan to query them all here.
|
||||
ch := make(chan quantile.Samples)
|
||||
getDBQuerySamples(ch)
|
||||
q := quantile.NewTargeted(0.90)
|
||||
for samples := range ch {
|
||||
q.Merge(samples)
|
||||
}
|
||||
fmt.Println("perc90:", q.Query(0.90))
|
||||
}
|
||||
|
||||
func Example_window() {
|
||||
// Scenario: We want the 90th, 95th, and 99th percentiles for each
|
||||
// minute.
|
||||
|
||||
ch := make(chan float64)
|
||||
go sendStreamValues(ch)
|
||||
|
||||
tick := time.NewTicker(1 * time.Minute)
|
||||
q := quantile.NewTargeted(0.90, 0.95, 0.99)
|
||||
for {
|
||||
select {
|
||||
case t := <-tick.C:
|
||||
flushToDB(t, q.Samples())
|
||||
q.Reset()
|
||||
case v := <-ch:
|
||||
q.Insert(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sendStreamValues(ch chan float64) {
|
||||
// Use your imagination
|
||||
}
|
||||
|
||||
func flushToDB(t time.Time, samples quantile.Samples) {
|
||||
// Use your imagination
|
||||
}
|
||||
|
||||
// This is a stub for the above example. In reality this would hit the remote
|
||||
// servers via http or something like it.
|
||||
func getDBQuerySamples(ch chan quantile.Samples) {}
|
||||
|
||||
func sendFloats(ch chan<- float64) {
|
||||
f, err := os.Open("exampledata.txt")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
sc := bufio.NewScanner(f)
|
||||
for sc.Scan() {
|
||||
b := sc.Bytes()
|
||||
v, err := strconv.ParseFloat(string(b), 64)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
ch <- v
|
||||
}
|
||||
if sc.Err() != nil {
|
||||
log.Fatal(sc.Err())
|
||||
}
|
||||
close(ch)
|
||||
}
|
||||
2388
Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/exampledata.txt
generated
vendored
Normal file
2388
Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/exampledata.txt
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
292
Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/stream.go
generated
vendored
Normal file
292
Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/stream.go
generated
vendored
Normal file
@@ -0,0 +1,292 @@
|
||||
// Package quantile computes approximate quantiles over an unbounded data
|
||||
// stream within low memory and CPU bounds.
|
||||
//
|
||||
// A small amount of accuracy is traded to achieve the above properties.
|
||||
//
|
||||
// Multiple streams can be merged before calling Query to generate a single set
|
||||
// of results. This is meaningful when the streams represent the same type of
|
||||
// data. See Merge and Samples.
|
||||
//
|
||||
// For more detailed information about the algorithm used, see:
|
||||
//
|
||||
// Effective Computation of Biased Quantiles over Data Streams
|
||||
//
|
||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sample holds an observed value and meta information for compression. JSON
|
||||
// tags have been added for convenience.
|
||||
type Sample struct {
|
||||
Value float64 `json:",string"`
|
||||
Width float64 `json:",string"`
|
||||
Delta float64 `json:",string"`
|
||||
}
|
||||
|
||||
// Samples represents a slice of samples. It implements sort.Interface.
|
||||
type Samples []Sample
|
||||
|
||||
func (a Samples) Len() int { return len(a) }
|
||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type invariant func(s *stream, r float64) float64
|
||||
|
||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the lower ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewLowBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * r
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the higher ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewHighBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * (s.n - r)
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||
// space and computation time. The targets map maps the desired quantiles to
|
||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targets map[float64]float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for quantile, epsilon := range targets {
|
||||
if quantile*s.n <= r {
|
||||
f = (2 * epsilon * r) / quantile
|
||||
} else {
|
||||
f = (2 * epsilon * (s.n - r)) / (1 - quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
*stream
|
||||
b Samples
|
||||
sorted bool
|
||||
}
|
||||
|
||||
func newStream(ƒ invariant) *Stream {
|
||||
x := &stream{ƒ: ƒ}
|
||||
return &Stream{x, make(Samples, 0, 500), true}
|
||||
}
|
||||
|
||||
// Insert inserts v into the stream.
|
||||
func (s *Stream) Insert(v float64) {
|
||||
s.insert(Sample{Value: v, Width: 1})
|
||||
}
|
||||
|
||||
func (s *Stream) insert(sample Sample) {
|
||||
s.b = append(s.b, sample)
|
||||
s.sorted = false
|
||||
if len(s.b) == cap(s.b) {
|
||||
s.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Query returns the computed qth percentiles value. If s was created with
|
||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||
// will return an unspecified result.
|
||||
func (s *Stream) Query(q float64) float64 {
|
||||
if !s.flushed() {
|
||||
// Fast path when there hasn't been enough data for a flush;
|
||||
// this also yields better accuracy for small sets of data.
|
||||
l := len(s.b)
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(float64(l) * q)
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
s.maybeSort()
|
||||
return s.b[i].Value
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.query(q)
|
||||
}
|
||||
|
||||
// Merge merges samples into the underlying streams samples. This is handy when
|
||||
// merging multiple streams from separate threads, database shards, etc.
|
||||
//
|
||||
// ATTENTION: This method is broken and does not yield correct results. The
|
||||
// underlying algorithm is not capable of merging streams correctly.
|
||||
func (s *Stream) Merge(samples Samples) {
|
||||
sort.Sort(samples)
|
||||
s.stream.merge(samples)
|
||||
}
|
||||
|
||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||
func (s *Stream) Reset() {
|
||||
s.stream.reset()
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
// Samples returns stream samples held by s.
|
||||
func (s *Stream) Samples() Samples {
|
||||
if !s.flushed() {
|
||||
return s.b
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.samples()
|
||||
}
|
||||
|
||||
// Count returns the total number of samples observed in the stream
|
||||
// since initialization.
|
||||
func (s *Stream) Count() int {
|
||||
return len(s.b) + s.stream.count()
|
||||
}
|
||||
|
||||
func (s *Stream) flush() {
|
||||
s.maybeSort()
|
||||
s.stream.merge(s.b)
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
func (s *Stream) maybeSort() {
|
||||
if !s.sorted {
|
||||
s.sorted = true
|
||||
sort.Sort(s.b)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) flushed() bool {
|
||||
return len(s.stream.l) > 0
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
n float64
|
||||
l []Sample
|
||||
ƒ invariant
|
||||
}
|
||||
|
||||
func (s *stream) reset() {
|
||||
s.l = s.l[:0]
|
||||
s.n = 0
|
||||
}
|
||||
|
||||
func (s *stream) insert(v float64) {
|
||||
s.merge(Samples{{v, 1, 0}})
|
||||
}
|
||||
|
||||
func (s *stream) merge(samples Samples) {
|
||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||
// whole summaries. The paper doesn't mention merging summaries at
|
||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||
// do merges properly.
|
||||
var r float64
|
||||
i := 0
|
||||
for _, sample := range samples {
|
||||
for ; i < len(s.l); i++ {
|
||||
c := s.l[i]
|
||||
if c.Value > sample.Value {
|
||||
// Insert at position i.
|
||||
s.l = append(s.l, Sample{})
|
||||
copy(s.l[i+1:], s.l[i:])
|
||||
s.l[i] = Sample{
|
||||
sample.Value,
|
||||
sample.Width,
|
||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||
// TODO(beorn7): How to calculate delta correctly?
|
||||
}
|
||||
i++
|
||||
goto inserted
|
||||
}
|
||||
r += c.Width
|
||||
}
|
||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||
i++
|
||||
inserted:
|
||||
s.n += sample.Width
|
||||
r += sample.Width
|
||||
}
|
||||
s.compress()
|
||||
}
|
||||
|
||||
func (s *stream) count() int {
|
||||
return int(s.n)
|
||||
}
|
||||
|
||||
func (s *stream) query(q float64) float64 {
|
||||
t := math.Ceil(q * s.n)
|
||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||
p := s.l[0]
|
||||
var r float64
|
||||
for _, c := range s.l[1:] {
|
||||
r += p.Width
|
||||
if r+c.Width+c.Delta > t {
|
||||
return p.Value
|
||||
}
|
||||
p = c
|
||||
}
|
||||
return p.Value
|
||||
}
|
||||
|
||||
func (s *stream) compress() {
|
||||
if len(s.l) < 2 {
|
||||
return
|
||||
}
|
||||
x := s.l[len(s.l)-1]
|
||||
xi := len(s.l) - 1
|
||||
r := s.n - 1 - x.Width
|
||||
|
||||
for i := len(s.l) - 2; i >= 0; i-- {
|
||||
c := s.l[i]
|
||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||
x.Width += c.Width
|
||||
s.l[xi] = x
|
||||
// Remove element at i.
|
||||
copy(s.l[i:], s.l[i+1:])
|
||||
s.l = s.l[:len(s.l)-1]
|
||||
xi -= 1
|
||||
} else {
|
||||
x = c
|
||||
xi = i
|
||||
}
|
||||
r -= c.Width
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stream) samples() Samples {
|
||||
samples := make(Samples, len(s.l))
|
||||
copy(samples, s.l)
|
||||
return samples
|
||||
}
|
||||
185
Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/stream_test.go
generated
vendored
Normal file
185
Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/stream_test.go
generated
vendored
Normal file
@@ -0,0 +1,185 @@
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
Targets = map[float64]float64{
|
||||
0.01: 0.001,
|
||||
0.10: 0.01,
|
||||
0.50: 0.05,
|
||||
0.90: 0.01,
|
||||
0.99: 0.001,
|
||||
}
|
||||
TargetsSmallEpsilon = map[float64]float64{
|
||||
0.01: 0.0001,
|
||||
0.10: 0.001,
|
||||
0.50: 0.005,
|
||||
0.90: 0.001,
|
||||
0.99: 0.0001,
|
||||
}
|
||||
LowQuantiles = []float64{0.01, 0.1, 0.5}
|
||||
HighQuantiles = []float64{0.99, 0.9, 0.5}
|
||||
)
|
||||
|
||||
const RelativeEpsilon = 0.01
|
||||
|
||||
func verifyPercsWithAbsoluteEpsilon(t *testing.T, a []float64, s *Stream) {
|
||||
sort.Float64s(a)
|
||||
for quantile, epsilon := range Targets {
|
||||
n := float64(len(a))
|
||||
k := int(quantile * n)
|
||||
lower := int((quantile - epsilon) * n)
|
||||
if lower < 1 {
|
||||
lower = 1
|
||||
}
|
||||
upper := int(math.Ceil((quantile + epsilon) * n))
|
||||
if upper > len(a) {
|
||||
upper = len(a)
|
||||
}
|
||||
w, min, max := a[k-1], a[lower-1], a[upper-1]
|
||||
if g := s.Query(quantile); g < min || g > max {
|
||||
t.Errorf("q=%f: want %v [%f,%f], got %v", quantile, w, min, max, g)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func verifyLowPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) {
|
||||
sort.Float64s(a)
|
||||
for _, qu := range LowQuantiles {
|
||||
n := float64(len(a))
|
||||
k := int(qu * n)
|
||||
|
||||
lowerRank := int((1 - RelativeEpsilon) * qu * n)
|
||||
upperRank := int(math.Ceil((1 + RelativeEpsilon) * qu * n))
|
||||
w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1]
|
||||
if g := s.Query(qu); g < min || g > max {
|
||||
t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func verifyHighPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) {
|
||||
sort.Float64s(a)
|
||||
for _, qu := range HighQuantiles {
|
||||
n := float64(len(a))
|
||||
k := int(qu * n)
|
||||
|
||||
lowerRank := int((1 - (1+RelativeEpsilon)*(1-qu)) * n)
|
||||
upperRank := int(math.Ceil((1 - (1-RelativeEpsilon)*(1-qu)) * n))
|
||||
w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1]
|
||||
if g := s.Query(qu); g < min || g > max {
|
||||
t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func populateStream(s *Stream) []float64 {
|
||||
a := make([]float64, 0, 1e5+100)
|
||||
for i := 0; i < cap(a); i++ {
|
||||
v := rand.NormFloat64()
|
||||
// Add 5% asymmetric outliers.
|
||||
if i%20 == 0 {
|
||||
v = v*v + 1
|
||||
}
|
||||
s.Insert(v)
|
||||
a = append(a, v)
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func TestTargetedQuery(t *testing.T) {
|
||||
rand.Seed(42)
|
||||
s := NewTargeted(Targets)
|
||||
a := populateStream(s)
|
||||
verifyPercsWithAbsoluteEpsilon(t, a, s)
|
||||
}
|
||||
|
||||
func TestLowBiasedQuery(t *testing.T) {
|
||||
rand.Seed(42)
|
||||
s := NewLowBiased(RelativeEpsilon)
|
||||
a := populateStream(s)
|
||||
verifyLowPercsWithRelativeEpsilon(t, a, s)
|
||||
}
|
||||
|
||||
func TestHighBiasedQuery(t *testing.T) {
|
||||
rand.Seed(42)
|
||||
s := NewHighBiased(RelativeEpsilon)
|
||||
a := populateStream(s)
|
||||
verifyHighPercsWithRelativeEpsilon(t, a, s)
|
||||
}
|
||||
|
||||
func TestTargetedMerge(t *testing.T) {
|
||||
rand.Seed(42)
|
||||
s1 := NewTargeted(Targets)
|
||||
s2 := NewTargeted(Targets)
|
||||
a := populateStream(s1)
|
||||
a = append(a, populateStream(s2)...)
|
||||
s1.Merge(s2.Samples())
|
||||
verifyPercsWithAbsoluteEpsilon(t, a, s1)
|
||||
}
|
||||
|
||||
func TestLowBiasedMerge(t *testing.T) {
|
||||
rand.Seed(42)
|
||||
s1 := NewLowBiased(RelativeEpsilon)
|
||||
s2 := NewLowBiased(RelativeEpsilon)
|
||||
a := populateStream(s1)
|
||||
a = append(a, populateStream(s2)...)
|
||||
s1.Merge(s2.Samples())
|
||||
verifyLowPercsWithRelativeEpsilon(t, a, s2)
|
||||
}
|
||||
|
||||
func TestHighBiasedMerge(t *testing.T) {
|
||||
rand.Seed(42)
|
||||
s1 := NewHighBiased(RelativeEpsilon)
|
||||
s2 := NewHighBiased(RelativeEpsilon)
|
||||
a := populateStream(s1)
|
||||
a = append(a, populateStream(s2)...)
|
||||
s1.Merge(s2.Samples())
|
||||
verifyHighPercsWithRelativeEpsilon(t, a, s2)
|
||||
}
|
||||
|
||||
func TestUncompressed(t *testing.T) {
|
||||
q := NewTargeted(Targets)
|
||||
for i := 100; i > 0; i-- {
|
||||
q.Insert(float64(i))
|
||||
}
|
||||
if g := q.Count(); g != 100 {
|
||||
t.Errorf("want count 100, got %d", g)
|
||||
}
|
||||
// Before compression, Query should have 100% accuracy.
|
||||
for quantile := range Targets {
|
||||
w := quantile * 100
|
||||
if g := q.Query(quantile); g != w {
|
||||
t.Errorf("want %f, got %f", w, g)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUncompressedSamples(t *testing.T) {
|
||||
q := NewTargeted(map[float64]float64{0.99: 0.001})
|
||||
for i := 1; i <= 100; i++ {
|
||||
q.Insert(float64(i))
|
||||
}
|
||||
if g := q.Samples().Len(); g != 100 {
|
||||
t.Errorf("want count 100, got %d", g)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUncompressedOne(t *testing.T) {
|
||||
q := NewTargeted(map[float64]float64{0.99: 0.01})
|
||||
q.Insert(3.14)
|
||||
if g := q.Query(0.90); g != 3.14 {
|
||||
t.Error("want PI, got", g)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaults(t *testing.T) {
|
||||
if g := NewTargeted(map[float64]float64{0.99: 0.001}).Query(0.99); g != 0 {
|
||||
t.Errorf("want 0, got %f", g)
|
||||
}
|
||||
}
|
||||
110
Godeps/_workspace/src/github.com/prometheus/client_golang/model/fingerprinting.go
generated
vendored
Normal file
110
Godeps/_workspace/src/github.com/prometheus/client_golang/model/fingerprinting.go
generated
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Fingerprint provides a hash-capable representation of a Metric.
|
||||
// For our purposes, FNV-1A 64-bit is used.
|
||||
type Fingerprint uint64
|
||||
|
||||
func (f Fingerprint) String() string {
|
||||
return fmt.Sprintf("%016x", uint64(f))
|
||||
}
|
||||
|
||||
// Less implements sort.Interface.
|
||||
func (f Fingerprint) Less(o Fingerprint) bool {
|
||||
return f < o
|
||||
}
|
||||
|
||||
// Equal implements sort.Interface.
|
||||
func (f Fingerprint) Equal(o Fingerprint) bool {
|
||||
return f == o
|
||||
}
|
||||
|
||||
// LoadFromString transforms a string representation into a Fingerprint.
|
||||
func (f *Fingerprint) LoadFromString(s string) error {
|
||||
num, err := strconv.ParseUint(s, 16, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*f = Fingerprint(num)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fingerprints represents a collection of Fingerprint subject to a given
|
||||
// natural sorting scheme. It implements sort.Interface.
|
||||
type Fingerprints []Fingerprint
|
||||
|
||||
// Len implements sort.Interface.
|
||||
func (f Fingerprints) Len() int {
|
||||
return len(f)
|
||||
}
|
||||
|
||||
// Less implements sort.Interface.
|
||||
func (f Fingerprints) Less(i, j int) bool {
|
||||
return f[i] < f[j]
|
||||
}
|
||||
|
||||
// Swap implements sort.Interface.
|
||||
func (f Fingerprints) Swap(i, j int) {
|
||||
f[i], f[j] = f[j], f[i]
|
||||
}
|
||||
|
||||
// FingerprintSet is a set of Fingerprints.
|
||||
type FingerprintSet map[Fingerprint]struct{}
|
||||
|
||||
// Equal returns true if both sets contain the same elements (and not more).
|
||||
func (s FingerprintSet) Equal(o FingerprintSet) bool {
|
||||
if len(s) != len(o) {
|
||||
return false
|
||||
}
|
||||
|
||||
for k := range s {
|
||||
if _, ok := o[k]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Intersection returns the elements contained in both sets.
|
||||
func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
|
||||
myLength, otherLength := len(s), len(o)
|
||||
if myLength == 0 || otherLength == 0 {
|
||||
return FingerprintSet{}
|
||||
}
|
||||
|
||||
subSet := s
|
||||
superSet := o
|
||||
|
||||
if otherLength < myLength {
|
||||
subSet = o
|
||||
superSet = s
|
||||
}
|
||||
|
||||
out := FingerprintSet{}
|
||||
|
||||
for k := range subSet {
|
||||
if _, ok := superSet[k]; ok {
|
||||
out[k] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
63
Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go
generated
vendored
Normal file
63
Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// ExporterLabelPrefix is the label name prefix to prepend if a
|
||||
// synthetic label is already present in the exported metrics.
|
||||
ExporterLabelPrefix LabelName = "exporter_"
|
||||
|
||||
// MetricNameLabel is the label name indicating the metric name of a
|
||||
// timeseries.
|
||||
MetricNameLabel LabelName = "__name__"
|
||||
|
||||
// ReservedLabelPrefix is a prefix which is not legal in user-supplied
|
||||
// label names.
|
||||
ReservedLabelPrefix = "__"
|
||||
|
||||
// JobLabel is the label name indicating the job from which a timeseries
|
||||
// was scraped.
|
||||
JobLabel LabelName = "job"
|
||||
)
|
||||
|
||||
// A LabelName is a key for a LabelSet or Metric. It has a value associated
|
||||
// therewith.
|
||||
type LabelName string
|
||||
|
||||
// LabelNames is a sortable LabelName slice. In implements sort.Interface.
|
||||
type LabelNames []LabelName
|
||||
|
||||
func (l LabelNames) Len() int {
|
||||
return len(l)
|
||||
}
|
||||
|
||||
func (l LabelNames) Less(i, j int) bool {
|
||||
return l[i] < l[j]
|
||||
}
|
||||
|
||||
func (l LabelNames) Swap(i, j int) {
|
||||
l[i], l[j] = l[j], l[i]
|
||||
}
|
||||
|
||||
func (l LabelNames) String() string {
|
||||
labelStrings := make([]string, 0, len(l))
|
||||
for _, label := range l {
|
||||
labelStrings = append(labelStrings, string(label))
|
||||
}
|
||||
return strings.Join(labelStrings, ", ")
|
||||
}
|
||||
55
Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname_test.go
generated
vendored
Normal file
55
Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname_test.go
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testLabelNames(t testing.TB) {
|
||||
var scenarios = []struct {
|
||||
in LabelNames
|
||||
out LabelNames
|
||||
}{
|
||||
{
|
||||
in: LabelNames{"ZZZ", "zzz"},
|
||||
out: LabelNames{"ZZZ", "zzz"},
|
||||
},
|
||||
{
|
||||
in: LabelNames{"aaa", "AAA"},
|
||||
out: LabelNames{"AAA", "aaa"},
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
sort.Sort(scenario.in)
|
||||
|
||||
for j, expected := range scenario.out {
|
||||
if expected != scenario.in[j] {
|
||||
t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLabelNames(t *testing.T) {
|
||||
testLabelNames(t)
|
||||
}
|
||||
|
||||
func BenchmarkLabelNames(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLabelNames(b)
|
||||
}
|
||||
}
|
||||
64
Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelset.go
generated
vendored
Normal file
64
Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelset.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
|
||||
// may be fully-qualified down to the point where it may resolve to a single
|
||||
// Metric in the data store or not. All operations that occur within the realm
|
||||
// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
|
||||
// match.
|
||||
type LabelSet map[LabelName]LabelValue
|
||||
|
||||
// Merge is a helper function to non-destructively merge two label sets.
|
||||
func (l LabelSet) Merge(other LabelSet) LabelSet {
|
||||
result := make(LabelSet, len(l))
|
||||
|
||||
for k, v := range l {
|
||||
result[k] = v
|
||||
}
|
||||
|
||||
for k, v := range other {
|
||||
result[k] = v
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (l LabelSet) String() string {
|
||||
labelStrings := make([]string, 0, len(l))
|
||||
for label, value := range l {
|
||||
labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
|
||||
}
|
||||
|
||||
switch len(labelStrings) {
|
||||
case 0:
|
||||
return ""
|
||||
default:
|
||||
sort.Strings(labelStrings)
|
||||
return fmt.Sprintf("{%s}", strings.Join(labelStrings, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
// MergeFromMetric merges Metric into this LabelSet.
|
||||
func (l LabelSet) MergeFromMetric(m Metric) {
|
||||
for k, v := range m {
|
||||
l[k] = v
|
||||
}
|
||||
}
|
||||
36
Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelvalue.go
generated
vendored
Normal file
36
Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelvalue.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// A LabelValue is an associated value for a LabelName.
|
||||
type LabelValue string
|
||||
|
||||
// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
|
||||
type LabelValues []LabelValue
|
||||
|
||||
func (l LabelValues) Len() int {
|
||||
return len(l)
|
||||
}
|
||||
|
||||
func (l LabelValues) Less(i, j int) bool {
|
||||
return sort.StringsAreSorted([]string{string(l[i]), string(l[j])})
|
||||
}
|
||||
|
||||
func (l LabelValues) Swap(i, j int) {
|
||||
l[i], l[j] = l[j], l[i]
|
||||
}
|
||||
55
Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelvalue_test.go
generated
vendored
Normal file
55
Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelvalue_test.go
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testLabelValues(t testing.TB) {
|
||||
var scenarios = []struct {
|
||||
in LabelValues
|
||||
out LabelValues
|
||||
}{
|
||||
{
|
||||
in: LabelValues{"ZZZ", "zzz"},
|
||||
out: LabelValues{"ZZZ", "zzz"},
|
||||
},
|
||||
{
|
||||
in: LabelValues{"aaa", "AAA"},
|
||||
out: LabelValues{"AAA", "aaa"},
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
sort.Sort(scenario.in)
|
||||
|
||||
for j, expected := range scenario.out {
|
||||
if expected != scenario.in[j] {
|
||||
t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLabelValues(t *testing.T) {
|
||||
testLabelValues(t)
|
||||
}
|
||||
|
||||
func BenchmarkLabelValues(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLabelValues(b)
|
||||
}
|
||||
}
|
||||
151
Godeps/_workspace/src/github.com/prometheus/client_golang/model/metric.go
generated
vendored
Normal file
151
Godeps/_workspace/src/github.com/prometheus/client_golang/model/metric.go
generated
vendored
Normal file
@@ -0,0 +1,151 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A Metric is similar to a LabelSet, but the key difference is that a Metric is
|
||||
// a singleton and refers to one and only one stream of samples.
|
||||
type Metric map[LabelName]LabelValue
|
||||
|
||||
// Equal compares the fingerprints of both metrics.
|
||||
func (m Metric) Equal(o Metric) bool {
|
||||
return m.Fingerprint().Equal(o.Fingerprint())
|
||||
}
|
||||
|
||||
// Before compares the fingerprints of both metrics.
|
||||
func (m Metric) Before(o Metric) bool {
|
||||
return m.Fingerprint().Less(o.Fingerprint())
|
||||
}
|
||||
|
||||
// String implements Stringer.
|
||||
func (m Metric) String() string {
|
||||
metricName, hasName := m[MetricNameLabel]
|
||||
numLabels := len(m) - 1
|
||||
if !hasName {
|
||||
numLabels = len(m)
|
||||
}
|
||||
labelStrings := make([]string, 0, numLabels)
|
||||
for label, value := range m {
|
||||
if label != MetricNameLabel {
|
||||
labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
|
||||
}
|
||||
}
|
||||
|
||||
switch numLabels {
|
||||
case 0:
|
||||
if hasName {
|
||||
return string(metricName)
|
||||
}
|
||||
return "{}"
|
||||
default:
|
||||
sort.Strings(labelStrings)
|
||||
return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
// Fingerprint returns a Metric's Fingerprint.
|
||||
func (m Metric) Fingerprint() Fingerprint {
|
||||
labelLength := len(m)
|
||||
labelNames := make([]string, 0, labelLength)
|
||||
|
||||
for labelName := range m {
|
||||
labelNames = append(labelNames, string(labelName))
|
||||
}
|
||||
|
||||
sort.Strings(labelNames)
|
||||
|
||||
summer := fnv.New64a()
|
||||
|
||||
for _, labelName := range labelNames {
|
||||
labelValue := m[LabelName(labelName)]
|
||||
|
||||
summer.Write([]byte(labelName))
|
||||
summer.Write([]byte{0})
|
||||
summer.Write([]byte(labelValue))
|
||||
}
|
||||
|
||||
return Fingerprint(binary.LittleEndian.Uint64(summer.Sum(nil)))
|
||||
}
|
||||
|
||||
// Clone returns a copy of the Metric.
|
||||
func (m Metric) Clone() Metric {
|
||||
clone := Metric{}
|
||||
for k, v := range m {
|
||||
clone[k] = v
|
||||
}
|
||||
return clone
|
||||
}
|
||||
|
||||
// MergeFromLabelSet merges a label set into this Metric, prefixing a collision
|
||||
// prefix to the label names merged from the label set where required.
|
||||
func (m Metric) MergeFromLabelSet(labels LabelSet, collisionPrefix LabelName) {
|
||||
for k, v := range labels {
|
||||
if collisionPrefix != "" {
|
||||
for {
|
||||
if _, exists := m[k]; !exists {
|
||||
break
|
||||
}
|
||||
k = collisionPrefix + k
|
||||
}
|
||||
}
|
||||
|
||||
m[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// COWMetric wraps a Metric to enable copy-on-write access patterns.
|
||||
type COWMetric struct {
|
||||
Copied bool
|
||||
Metric Metric
|
||||
}
|
||||
|
||||
// Set sets a label name in the wrapped Metric to a given value and copies the
|
||||
// Metric initially, if it is not already a copy.
|
||||
func (m COWMetric) Set(ln LabelName, lv LabelValue) {
|
||||
m.doCOW()
|
||||
m.Metric[ln] = lv
|
||||
}
|
||||
|
||||
// Delete deletes a given label name from the wrapped Metric and copies the
|
||||
// Metric initially, if it is not already a copy.
|
||||
func (m *COWMetric) Delete(ln LabelName) {
|
||||
m.doCOW()
|
||||
delete(m.Metric, ln)
|
||||
}
|
||||
|
||||
// doCOW copies the underlying Metric if it is not already a copy.
|
||||
func (m *COWMetric) doCOW() {
|
||||
if !m.Copied {
|
||||
m.Metric = m.Metric.Clone()
|
||||
m.Copied = true
|
||||
}
|
||||
}
|
||||
|
||||
// String implements fmt.Stringer.
|
||||
func (m COWMetric) String() string {
|
||||
return m.Metric.String()
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (m COWMetric) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(m.Metric)
|
||||
}
|
||||
58
Godeps/_workspace/src/github.com/prometheus/client_golang/model/metric_test.go
generated
vendored
Normal file
58
Godeps/_workspace/src/github.com/prometheus/client_golang/model/metric_test.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import "testing"
|
||||
|
||||
func testMetric(t testing.TB) {
|
||||
var scenarios = []struct {
|
||||
input Metric
|
||||
fingerprint Fingerprint
|
||||
}{
|
||||
{
|
||||
input: Metric{},
|
||||
fingerprint: 2676020557754725067,
|
||||
},
|
||||
{
|
||||
input: Metric{
|
||||
"first_name": "electro",
|
||||
"occupation": "robot",
|
||||
"manufacturer": "westinghouse",
|
||||
},
|
||||
fingerprint: 13260944541294022935,
|
||||
},
|
||||
{
|
||||
input: Metric{
|
||||
"x": "y",
|
||||
},
|
||||
fingerprint: 1470933794305433534,
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
if scenario.fingerprint != scenario.input.Fingerprint() {
|
||||
t.Errorf("%d. expected %d, got %d", i, scenario.fingerprint, scenario.input.Fingerprint())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetric(t *testing.T) {
|
||||
testMetric(t)
|
||||
}
|
||||
|
||||
func BenchmarkMetric(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testMetric(b)
|
||||
}
|
||||
}
|
||||
15
Godeps/_workspace/src/github.com/prometheus/client_golang/model/model.go
generated
vendored
Normal file
15
Godeps/_workspace/src/github.com/prometheus/client_golang/model/model.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package model contains core representation of Prometheus client primitives.
|
||||
package model
|
||||
79
Godeps/_workspace/src/github.com/prometheus/client_golang/model/sample.go
generated
vendored
Normal file
79
Godeps/_workspace/src/github.com/prometheus/client_golang/model/sample.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
// Sample is a sample value with a timestamp and a metric.
|
||||
type Sample struct {
|
||||
Metric Metric
|
||||
Value SampleValue
|
||||
Timestamp Timestamp
|
||||
}
|
||||
|
||||
// Equal compares first the metrics, then the timestamp, then the value.
|
||||
func (s *Sample) Equal(o *Sample) bool {
|
||||
if s == o {
|
||||
return true
|
||||
}
|
||||
|
||||
if !s.Metric.Equal(o.Metric) {
|
||||
return false
|
||||
}
|
||||
if !s.Timestamp.Equal(o.Timestamp) {
|
||||
return false
|
||||
}
|
||||
if !s.Value.Equal(o.Value) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Samples is a sortable Sample slice. It implements sort.Interface.
|
||||
type Samples []*Sample
|
||||
|
||||
func (s Samples) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// Less compares first the metrics, then the timestamp.
|
||||
func (s Samples) Less(i, j int) bool {
|
||||
switch {
|
||||
case s[i].Metric.Before(s[j].Metric):
|
||||
return true
|
||||
case s[j].Metric.Before(s[i].Metric):
|
||||
return false
|
||||
case s[i].Timestamp.Before(s[j].Timestamp):
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (s Samples) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
// Equal compares two sets of samples and returns true if they are equal.
|
||||
func (s Samples) Equal(o Samples) bool {
|
||||
if len(s) != len(o) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i, sample := range s {
|
||||
if !sample.Equal(o[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
126
Godeps/_workspace/src/github.com/prometheus/client_golang/model/sample_test.go
generated
vendored
Normal file
126
Godeps/_workspace/src/github.com/prometheus/client_golang/model/sample_test.go
generated
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSamplesSort(t *testing.T) {
|
||||
input := Samples{
|
||||
&Sample{
|
||||
// Fingerprint: 81f9c9ed24563f8f.
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "A",
|
||||
},
|
||||
Timestamp: 1,
|
||||
},
|
||||
&Sample{
|
||||
// Fingerprint: 81f9c9ed24563f8f.
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "A",
|
||||
},
|
||||
Timestamp: 2,
|
||||
},
|
||||
&Sample{
|
||||
// Fingerprint: 1bf6c9ed24543f8f.
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "C",
|
||||
},
|
||||
Timestamp: 1,
|
||||
},
|
||||
&Sample{
|
||||
// Fingerprint: 1bf6c9ed24543f8f.
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "C",
|
||||
},
|
||||
Timestamp: 2,
|
||||
},
|
||||
&Sample{
|
||||
// Fingerprint: 68f4c9ed24533f8f.
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "B",
|
||||
},
|
||||
Timestamp: 1,
|
||||
},
|
||||
&Sample{
|
||||
// Fingerprint: 68f4c9ed24533f8f.
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "B",
|
||||
},
|
||||
Timestamp: 2,
|
||||
},
|
||||
}
|
||||
|
||||
expected := Samples{
|
||||
&Sample{
|
||||
// Fingerprint: 1bf6c9ed24543f8f.
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "C",
|
||||
},
|
||||
Timestamp: 1,
|
||||
},
|
||||
&Sample{
|
||||
// Fingerprint: 1bf6c9ed24543f8f.
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "C",
|
||||
},
|
||||
Timestamp: 2,
|
||||
},
|
||||
&Sample{
|
||||
// Fingerprint: 68f4c9ed24533f8f.
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "B",
|
||||
},
|
||||
Timestamp: 1,
|
||||
},
|
||||
&Sample{
|
||||
// Fingerprint: 68f4c9ed24533f8f.
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "B",
|
||||
},
|
||||
Timestamp: 2,
|
||||
},
|
||||
&Sample{
|
||||
// Fingerprint: 81f9c9ed24563f8f.
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "A",
|
||||
},
|
||||
Timestamp: 1,
|
||||
},
|
||||
&Sample{
|
||||
// Fingerprint: 81f9c9ed24563f8f.
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "A",
|
||||
},
|
||||
Timestamp: 2,
|
||||
},
|
||||
}
|
||||
|
||||
sort.Sort(input)
|
||||
|
||||
for i, actual := range input {
|
||||
actualFp := actual.Metric.Fingerprint()
|
||||
expectedFp := expected[i].Metric.Fingerprint()
|
||||
|
||||
if !actualFp.Equal(expectedFp) {
|
||||
t.Fatalf("%d. Incorrect fingerprint. Got %s; want %s", i, actualFp.String(), expectedFp.String())
|
||||
}
|
||||
|
||||
if actual.Timestamp != expected[i].Timestamp {
|
||||
t.Fatalf("%d. Incorrect timestamp. Got %s; want %s", i, actual.Timestamp, expected[i].Timestamp)
|
||||
}
|
||||
}
|
||||
}
|
||||
37
Godeps/_workspace/src/github.com/prometheus/client_golang/model/samplevalue.go
generated
vendored
Normal file
37
Godeps/_workspace/src/github.com/prometheus/client_golang/model/samplevalue.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// A SampleValue is a representation of a value for a given sample at a given
|
||||
// time.
|
||||
type SampleValue float64
|
||||
|
||||
// Equal does a straight v==o.
|
||||
func (v SampleValue) Equal(o SampleValue) bool {
|
||||
return v == o
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (v SampleValue) MarshalJSON() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf(`"%s"`, v)), nil
|
||||
}
|
||||
|
||||
func (v SampleValue) String() string {
|
||||
return strconv.FormatFloat(float64(v), 'f', -1, 64)
|
||||
}
|
||||
97
Godeps/_workspace/src/github.com/prometheus/client_golang/model/signature.go
generated
vendored
Normal file
97
Godeps/_workspace/src/github.com/prometheus/client_golang/model/signature.go
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"hash"
|
||||
"hash/fnv"
|
||||
)
|
||||
|
||||
// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
|
||||
// used to separate label names, label values, and other strings from each other
|
||||
// when calculating their combined hash value (aka signature aka fingerprint).
|
||||
const SeparatorByte byte = 255
|
||||
|
||||
var (
|
||||
// cache the signature of an empty label set.
|
||||
emptyLabelSignature = fnv.New64a().Sum64()
|
||||
|
||||
hashAndBufPool = make(chan *hashAndBuf, 1024)
|
||||
)
|
||||
|
||||
type hashAndBuf struct {
|
||||
h hash.Hash64
|
||||
b bytes.Buffer
|
||||
}
|
||||
|
||||
func getHashAndBuf() *hashAndBuf {
|
||||
select {
|
||||
case hb := <-hashAndBufPool:
|
||||
return hb
|
||||
default:
|
||||
return &hashAndBuf{h: fnv.New64a()}
|
||||
}
|
||||
}
|
||||
|
||||
func putHashAndBuf(hb *hashAndBuf) {
|
||||
select {
|
||||
case hashAndBufPool <- hb:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// LabelsToSignature returns a unique signature (i.e., fingerprint) for a given
|
||||
// label set.
|
||||
func LabelsToSignature(labels map[string]string) uint64 {
|
||||
if len(labels) == 0 {
|
||||
return emptyLabelSignature
|
||||
}
|
||||
|
||||
var result uint64
|
||||
hb := getHashAndBuf()
|
||||
defer putHashAndBuf(hb)
|
||||
|
||||
for k, v := range labels {
|
||||
hb.b.WriteString(k)
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.b.WriteString(v)
|
||||
hb.h.Write(hb.b.Bytes())
|
||||
result ^= hb.h.Sum64()
|
||||
hb.h.Reset()
|
||||
hb.b.Reset()
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// LabelValuesToSignature returns a unique signature (i.e., fingerprint) for the
|
||||
// values of a given label set.
|
||||
func LabelValuesToSignature(labels map[string]string) uint64 {
|
||||
if len(labels) == 0 {
|
||||
return emptyLabelSignature
|
||||
}
|
||||
|
||||
var result uint64
|
||||
hb := getHashAndBuf()
|
||||
defer putHashAndBuf(hb)
|
||||
|
||||
for _, v := range labels {
|
||||
hb.b.WriteString(v)
|
||||
hb.h.Write(hb.b.Bytes())
|
||||
result ^= hb.h.Sum64()
|
||||
hb.h.Reset()
|
||||
hb.b.Reset()
|
||||
}
|
||||
return result
|
||||
}
|
||||
120
Godeps/_workspace/src/github.com/prometheus/client_golang/model/signature_test.go
generated
vendored
Normal file
120
Godeps/_workspace/src/github.com/prometheus/client_golang/model/signature_test.go
generated
vendored
Normal file
@@ -0,0 +1,120 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testLabelsToSignature(t testing.TB) {
|
||||
var scenarios = []struct {
|
||||
in map[string]string
|
||||
out uint64
|
||||
}{
|
||||
{
|
||||
in: map[string]string{},
|
||||
out: 14695981039346656037,
|
||||
},
|
||||
{
|
||||
in: map[string]string{"name": "garland, briggs", "fear": "love is not enough"},
|
||||
out: 12952432476264840823,
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
actual := LabelsToSignature(scenario.in)
|
||||
|
||||
if actual != scenario.out {
|
||||
t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLabelToSignature(t *testing.T) {
|
||||
testLabelsToSignature(t)
|
||||
}
|
||||
|
||||
func TestEmptyLabelSignature(t *testing.T) {
|
||||
input := []map[string]string{nil, {}}
|
||||
|
||||
var ms runtime.MemStats
|
||||
runtime.ReadMemStats(&ms)
|
||||
|
||||
alloc := ms.Alloc
|
||||
|
||||
for _, labels := range input {
|
||||
LabelsToSignature(labels)
|
||||
}
|
||||
|
||||
runtime.ReadMemStats(&ms)
|
||||
|
||||
if got := ms.Alloc; alloc != got {
|
||||
t.Fatal("expected LabelsToSignature with empty labels not to perform allocations")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLabelToSignature(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLabelsToSignature(b)
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkLabelValuesToSignature(b *testing.B, l map[string]string, e uint64) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
if a := LabelValuesToSignature(l); a != e {
|
||||
b.Fatalf("expected signature of %d for %s, got %d", e, l, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLabelValuesToSignatureScalar(b *testing.B) {
|
||||
benchmarkLabelValuesToSignature(b, nil, 14695981039346656037)
|
||||
}
|
||||
|
||||
func BenchmarkLabelValuesToSignatureSingle(b *testing.B) {
|
||||
benchmarkLabelValuesToSignature(b, map[string]string{"first-label": "first-label-value"}, 2653746141194979650)
|
||||
}
|
||||
|
||||
func BenchmarkLabelValuesToSignatureDouble(b *testing.B) {
|
||||
benchmarkLabelValuesToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value"}, 8893559499616767364)
|
||||
}
|
||||
|
||||
func BenchmarkLabelValuesToSignatureTriple(b *testing.B) {
|
||||
benchmarkLabelValuesToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 1685970066862087833)
|
||||
}
|
||||
|
||||
func benchmarkLabelToSignature(b *testing.B, l map[string]string, e uint64) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
if a := LabelsToSignature(l); a != e {
|
||||
b.Fatalf("expected signature of %d for %s, got %d", e, l, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLabelToSignatureScalar(b *testing.B) {
|
||||
benchmarkLabelToSignature(b, nil, 14695981039346656037)
|
||||
}
|
||||
|
||||
func BenchmarkLabelToSignatureSingle(b *testing.B) {
|
||||
benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value"}, 5147259542624943964)
|
||||
}
|
||||
|
||||
func BenchmarkLabelToSignatureDouble(b *testing.B) {
|
||||
benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value"}, 18269973311206963528)
|
||||
}
|
||||
|
||||
func BenchmarkLabelToSignatureTriple(b *testing.B) {
|
||||
benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676)
|
||||
}
|
||||
112
Godeps/_workspace/src/github.com/prometheus/client_golang/model/timestamp.go
generated
vendored
Normal file
112
Godeps/_workspace/src/github.com/prometheus/client_golang/model/timestamp.go
generated
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
native_time "time"
|
||||
)
|
||||
|
||||
// TODO(julius): Should this use milliseconds/nanoseconds instead? This is
|
||||
// mostly hidden from the user of these types when using the
|
||||
// methods below, so it will be easy to change this later
|
||||
// without requiring significant user code changes.
|
||||
|
||||
// Timestamp is the number of seconds since the epoch (1970-01-01 00:00 UTC)
|
||||
// without leap seconds.
|
||||
type Timestamp int64
|
||||
|
||||
const (
|
||||
// MinimumTick is the minimum supported time resolution. This has to be
|
||||
// at least native_time.Second in order for the code below to work.
|
||||
MinimumTick = native_time.Millisecond
|
||||
// second is the timestamp duration equivalent to one second.
|
||||
second = int64(native_time.Second / MinimumTick)
|
||||
// The number of nanoseconds per minimum tick.
|
||||
nanosPerTick = int64(MinimumTick / native_time.Nanosecond)
|
||||
)
|
||||
|
||||
// Equal reports whether two timestamps represent the same instant.
|
||||
func (t Timestamp) Equal(o Timestamp) bool {
|
||||
return t == o
|
||||
}
|
||||
|
||||
// Before reports whether the timestamp t is before o.
|
||||
func (t Timestamp) Before(o Timestamp) bool {
|
||||
return t < o
|
||||
}
|
||||
|
||||
// After reports whether the timestamp t is after o.
|
||||
func (t Timestamp) After(o Timestamp) bool {
|
||||
return t > o
|
||||
}
|
||||
|
||||
// Add returns the Timestamp t + d.
|
||||
func (t Timestamp) Add(d native_time.Duration) Timestamp {
|
||||
return t + Timestamp(d/MinimumTick)
|
||||
}
|
||||
|
||||
// Sub returns the Duration t - o.
|
||||
func (t Timestamp) Sub(o Timestamp) native_time.Duration {
|
||||
return native_time.Duration(t-o) * MinimumTick
|
||||
}
|
||||
|
||||
// Time returns the time.Time representation of t.
|
||||
func (t Timestamp) Time() native_time.Time {
|
||||
return native_time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
|
||||
}
|
||||
|
||||
// Unix returns t as a Unix time, the number of seconds elapsed
|
||||
// since January 1, 1970 UTC.
|
||||
func (t Timestamp) Unix() int64 {
|
||||
return int64(t) / second
|
||||
}
|
||||
|
||||
// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
|
||||
// since January 1, 1970 UTC.
|
||||
func (t Timestamp) UnixNano() int64 {
|
||||
return int64(t) * nanosPerTick
|
||||
}
|
||||
|
||||
// String returns a string representation of the timestamp.
|
||||
func (t Timestamp) String() string {
|
||||
return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
|
||||
}
|
||||
|
||||
func (t Timestamp) MarshalJSON() ([]byte, error) {
|
||||
return []byte(t.String()), nil
|
||||
}
|
||||
|
||||
// Now returns the current time as a Timestamp.
|
||||
func Now() Timestamp {
|
||||
return TimestampFromTime(native_time.Now())
|
||||
}
|
||||
|
||||
// TimestampFromTime returns the Timestamp equivalent to the time.Time t.
|
||||
func TimestampFromTime(t native_time.Time) Timestamp {
|
||||
return TimestampFromUnixNano(t.UnixNano())
|
||||
}
|
||||
|
||||
// TimestampFromUnix returns the Timestamp equivalent to the Unix timestamp t
|
||||
// provided in seconds.
|
||||
func TimestampFromUnix(t int64) Timestamp {
|
||||
return Timestamp(t * second)
|
||||
}
|
||||
|
||||
// TimestampFromUnixNano returns the Timestamp equivalent to the Unix timestamp
|
||||
// t provided in nanoseconds.
|
||||
func TimestampFromUnixNano(t int64) Timestamp {
|
||||
return Timestamp(t / nanosPerTick)
|
||||
}
|
||||
86
Godeps/_workspace/src/github.com/prometheus/client_golang/model/timestamp_test.go
generated
vendored
Normal file
86
Godeps/_workspace/src/github.com/prometheus/client_golang/model/timestamp_test.go
generated
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"testing"
|
||||
native_time "time"
|
||||
)
|
||||
|
||||
func TestComparators(t *testing.T) {
|
||||
t1a := TimestampFromUnix(0)
|
||||
t1b := TimestampFromUnix(0)
|
||||
t2 := TimestampFromUnix(2*second - 1)
|
||||
|
||||
if !t1a.Equal(t1b) {
|
||||
t.Fatalf("Expected %s to be equal to %s", t1a, t1b)
|
||||
}
|
||||
if t1a.Equal(t2) {
|
||||
t.Fatalf("Expected %s to not be equal to %s", t1a, t2)
|
||||
}
|
||||
|
||||
if !t1a.Before(t2) {
|
||||
t.Fatalf("Expected %s to be before %s", t1a, t2)
|
||||
}
|
||||
if t1a.Before(t1b) {
|
||||
t.Fatalf("Expected %s to not be before %s", t1a, t1b)
|
||||
}
|
||||
|
||||
if !t2.After(t1a) {
|
||||
t.Fatalf("Expected %s to be after %s", t2, t1a)
|
||||
}
|
||||
if t1b.After(t1a) {
|
||||
t.Fatalf("Expected %s to not be after %s", t1b, t1a)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampConversions(t *testing.T) {
|
||||
unixSecs := int64(1136239445)
|
||||
unixNsecs := int64(123456789)
|
||||
unixNano := unixSecs*1000000000 + unixNsecs
|
||||
|
||||
t1 := native_time.Unix(unixSecs, unixNsecs-unixNsecs%nanosPerTick)
|
||||
t2 := native_time.Unix(unixSecs, unixNsecs)
|
||||
|
||||
ts := TimestampFromUnixNano(unixNano)
|
||||
if !ts.Time().Equal(t1) {
|
||||
t.Fatalf("Expected %s, got %s", t1, ts.Time())
|
||||
}
|
||||
|
||||
// Test available precision.
|
||||
ts = TimestampFromTime(t2)
|
||||
if !ts.Time().Equal(t1) {
|
||||
t.Fatalf("Expected %s, got %s", t1, ts.Time())
|
||||
}
|
||||
|
||||
if ts.UnixNano() != unixNano-unixNano%nanosPerTick {
|
||||
t.Fatalf("Expected %d, got %d", unixNano, ts.UnixNano())
|
||||
}
|
||||
}
|
||||
|
||||
func TestDuration(t *testing.T) {
|
||||
duration := native_time.Second + native_time.Minute + native_time.Hour
|
||||
goTime := native_time.Unix(1136239445, 0)
|
||||
|
||||
ts := TimestampFromTime(goTime)
|
||||
if !goTime.Add(duration).Equal(ts.Add(duration).Time()) {
|
||||
t.Fatalf("Expected %s to be equal to %s", goTime.Add(duration), ts.Add(duration))
|
||||
}
|
||||
|
||||
earlier := ts.Add(-duration)
|
||||
delta := ts.Sub(earlier)
|
||||
if delta != duration {
|
||||
t.Fatalf("Expected %s to be equal to %s", delta, duration)
|
||||
}
|
||||
}
|
||||
1
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/.gitignore
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
command-line-arguments.test
|
||||
53
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/README.md
generated
vendored
Normal file
53
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/README.md
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
# Overview
|
||||
This is the [Prometheus](http://www.prometheus.io) telemetric
|
||||
instrumentation client [Go](http://golang.org) client library. It
|
||||
enable authors to define process-space metrics for their servers and
|
||||
expose them through a web service interface for extraction,
|
||||
aggregation, and a whole slew of other post processing techniques.
|
||||
|
||||
# Installing
|
||||
$ go get github.com/prometheus/client_golang/prometheus
|
||||
|
||||
# Example
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
indexed = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "my_company",
|
||||
Subsystem: "indexer",
|
||||
Name: "documents_indexed",
|
||||
Help: "The number of documents indexed.",
|
||||
})
|
||||
size = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "my_company",
|
||||
Subsystem: "storage",
|
||||
Name: "documents_total_size_bytes",
|
||||
Help: "The total size of all documents in the storage."}})
|
||||
)
|
||||
|
||||
func main() {
|
||||
http.Handle("/metrics", prometheus.Handler())
|
||||
|
||||
indexed.Inc()
|
||||
size.Set(5)
|
||||
|
||||
http.ListenAndServe(":8080", nil)
|
||||
}
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(indexed)
|
||||
prometheus.MustRegister(size)
|
||||
}
|
||||
```
|
||||
|
||||
# Documentation
|
||||
|
||||
[](https://godoc.org/github.com/prometheus/client_golang)
|
||||
|
||||
131
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/benchmark_test.go
generated
vendored
Normal file
131
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/benchmark_test.go
generated
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkCounterWithLabelValues(b *testing.B) {
|
||||
m := NewCounterVec(
|
||||
CounterOpts{
|
||||
Name: "benchmark_counter",
|
||||
Help: "A counter to benchmark it.",
|
||||
},
|
||||
[]string{"one", "two", "three"},
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.WithLabelValues("eins", "zwei", "drei").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCounterWithMappedLabels(b *testing.B) {
|
||||
m := NewCounterVec(
|
||||
CounterOpts{
|
||||
Name: "benchmark_counter",
|
||||
Help: "A counter to benchmark it.",
|
||||
},
|
||||
[]string{"one", "two", "three"},
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.With(Labels{"two": "zwei", "one": "eins", "three": "drei"}).Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCounterWithPreparedMappedLabels(b *testing.B) {
|
||||
m := NewCounterVec(
|
||||
CounterOpts{
|
||||
Name: "benchmark_counter",
|
||||
Help: "A counter to benchmark it.",
|
||||
},
|
||||
[]string{"one", "two", "three"},
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
labels := Labels{"two": "zwei", "one": "eins", "three": "drei"}
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.With(labels).Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCounterNoLabels(b *testing.B) {
|
||||
m := NewCounter(CounterOpts{
|
||||
Name: "benchmark_counter",
|
||||
Help: "A counter to benchmark it.",
|
||||
})
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGaugeWithLabelValues(b *testing.B) {
|
||||
m := NewGaugeVec(
|
||||
GaugeOpts{
|
||||
Name: "benchmark_gauge",
|
||||
Help: "A gauge to benchmark it.",
|
||||
},
|
||||
[]string{"one", "two", "three"},
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.WithLabelValues("eins", "zwei", "drei").Set(3.1415)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGaugeNoLabels(b *testing.B) {
|
||||
m := NewGauge(GaugeOpts{
|
||||
Name: "benchmark_gauge",
|
||||
Help: "A gauge to benchmark it.",
|
||||
})
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.Set(3.1415)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSummaryWithLabelValues(b *testing.B) {
|
||||
m := NewSummaryVec(
|
||||
SummaryOpts{
|
||||
Name: "benchmark_summary",
|
||||
Help: "A summary to benchmark it.",
|
||||
},
|
||||
[]string{"one", "two", "three"},
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSummaryNoLabels(b *testing.B) {
|
||||
m := NewSummary(SummaryOpts{
|
||||
Name: "benchmark_summary",
|
||||
Help: "A summary to benchmark it.",
|
||||
},
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.Observe(3.1415)
|
||||
}
|
||||
}
|
||||
75
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
Normal file
75
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
// Collector is the interface implemented by anything that can be used by
|
||||
// Prometheus to collect metrics. A Collector has to be registered for
|
||||
// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet.
|
||||
//
|
||||
// The stock metrics provided by this package (like Gauge, Counter, Summary) are
|
||||
// also Collectors (which only ever collect one metric, namely itself). An
|
||||
// implementer of Collector may, however, collect multiple metrics in a
|
||||
// coordinated fashion and/or create metrics on the fly. Examples for collectors
|
||||
// already implemented in this library are the metric vectors (i.e. collection
|
||||
// of multiple instances of the same Metric but with different label values)
|
||||
// like GaugeVec or SummaryVec, and the ExpvarCollector.
|
||||
type Collector interface {
|
||||
// Describe sends the super-set of all possible descriptors of metrics
|
||||
// collected by this Collector to the provided channel and returns once
|
||||
// the last descriptor has been sent. The sent descriptors fulfill the
|
||||
// consistency and uniqueness requirements described in the Desc
|
||||
// documentation. (It is valid if one and the same Collector sends
|
||||
// duplicate descriptors. Those duplicates are simply ignored. However,
|
||||
// two different Collectors must not send duplicate descriptors.) This
|
||||
// method idempotently sends the same descriptors throughout the
|
||||
// lifetime of the Collector. If a Collector encounters an error while
|
||||
// executing this method, it must send an invalid descriptor (created
|
||||
// with NewInvalidDesc) to signal the error to the registry.
|
||||
Describe(chan<- *Desc)
|
||||
// Collect is called by Prometheus when collecting metrics. The
|
||||
// implementation sends each collected metric via the provided channel
|
||||
// and returns once the last metric has been sent. The descriptor of
|
||||
// each sent metric is one of those returned by Describe. Returned
|
||||
// metrics that share the same descriptor must differ in their variable
|
||||
// label values. This method may be called concurrently and must
|
||||
// therefore be implemented in a concurrency safe way. Blocking occurs
|
||||
// at the expense of total performance of rendering all registered
|
||||
// metrics. Ideally, Collector implementations support concurrent
|
||||
// readers.
|
||||
Collect(chan<- Metric)
|
||||
}
|
||||
|
||||
// SelfCollector implements Collector for a single Metric so that that the
|
||||
// Metric collects itself. Add it as an anonymous field to a struct that
|
||||
// implements Metric, and call Init with the Metric itself as an argument.
|
||||
type SelfCollector struct {
|
||||
self Metric
|
||||
}
|
||||
|
||||
// Init provides the SelfCollector with a reference to the metric it is supposed
|
||||
// to collect. It is usually called within the factory function to create a
|
||||
// metric. See example.
|
||||
func (c *SelfCollector) Init(self Metric) {
|
||||
c.self = self
|
||||
}
|
||||
|
||||
// Describe implements Collector.
|
||||
func (c *SelfCollector) Describe(ch chan<- *Desc) {
|
||||
ch <- c.self.Desc()
|
||||
}
|
||||
|
||||
// Collect implements Collector.
|
||||
func (c *SelfCollector) Collect(ch chan<- Metric) {
|
||||
ch <- c.self
|
||||
}
|
||||
175
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
Normal file
175
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
Normal file
@@ -0,0 +1,175 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"hash/fnv"
|
||||
)
|
||||
|
||||
// Counter is a Metric that represents a single numerical value that only ever
|
||||
// goes up. That implies that it cannot be used to count items whose number can
|
||||
// also go down, e.g. the number of currently running goroutines. Those
|
||||
// "counters" are represented by Gauges.
|
||||
//
|
||||
// A Counter is typically used to count requests served, tasks completed, errors
|
||||
// occurred, etc.
|
||||
//
|
||||
// To create Counter instances, use NewCounter.
|
||||
type Counter interface {
|
||||
Metric
|
||||
Collector
|
||||
|
||||
// Set is used to set the Counter to an arbitrary value. It is only used
|
||||
// if you have to transfer a value from an external counter into this
|
||||
// Prometheus metrics. Do not use it for regular handling of a
|
||||
// Prometheus counter (as it can be used to break the contract of
|
||||
// monotonically increasing values).
|
||||
Set(float64)
|
||||
// Inc increments the counter by 1.
|
||||
Inc()
|
||||
// Add adds the given value to the counter. It panics if the value is <
|
||||
// 0.
|
||||
Add(float64)
|
||||
}
|
||||
|
||||
// CounterOpts is an alias for Opts. See there for doc comments.
|
||||
type CounterOpts Opts
|
||||
|
||||
// NewCounter creates a new Counter based on the provided CounterOpts.
|
||||
func NewCounter(opts CounterOpts) Counter {
|
||||
desc := NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
nil,
|
||||
opts.ConstLabels,
|
||||
)
|
||||
result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
|
||||
result.Init(result) // Init self-collection.
|
||||
return result
|
||||
}
|
||||
|
||||
type counter struct {
|
||||
value
|
||||
}
|
||||
|
||||
func (c *counter) Add(v float64) {
|
||||
if v < 0 {
|
||||
panic(errors.New("counter cannot decrease in value"))
|
||||
}
|
||||
c.value.Add(v)
|
||||
}
|
||||
|
||||
// CounterVec is a Collector that bundles a set of Counters that all share the
|
||||
// same Desc, but have different values for their variable labels. This is used
|
||||
// if you want to count the same thing partitioned by various dimensions
|
||||
// (e.g. number of http requests, partitioned by response code and
|
||||
// method). Create instances with NewCounterVec.
|
||||
//
|
||||
// CounterVec embeds MetricVec. See there for a full list of methods with
|
||||
// detailed documentation.
|
||||
type CounterVec struct {
|
||||
MetricVec
|
||||
}
|
||||
|
||||
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
|
||||
// partitioned by the given label names. At least one label name must be
|
||||
// provided.
|
||||
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
||||
desc := NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
labelNames,
|
||||
opts.ConstLabels,
|
||||
)
|
||||
return &CounterVec{
|
||||
MetricVec: MetricVec{
|
||||
children: map[uint64]Metric{},
|
||||
desc: desc,
|
||||
hash: fnv.New64a(),
|
||||
newMetric: func(lvs ...string) Metric {
|
||||
result := &counter{value: value{
|
||||
desc: desc,
|
||||
valType: CounterValue,
|
||||
labelPairs: makeLabelPairs(desc, lvs),
|
||||
}}
|
||||
result.Init(result) // Init self-collection.
|
||||
return result
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetricWithLabelValues replaces the method of the same name in
|
||||
// MetricVec. The difference is that this method returns a Counter and not a
|
||||
// Metric so that no type conversion is required.
|
||||
func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
|
||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
||||
if metric != nil {
|
||||
return metric.(Counter), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
||||
// difference is that this method returns a Counter and not a Metric so that no
|
||||
// type conversion is required.
|
||||
func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
|
||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
||||
if metric != nil {
|
||||
return metric.(Counter), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
||||
// error, WithLabelValues allows shortcuts like
|
||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
||||
func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
|
||||
return m.MetricVec.WithLabelValues(lvs...).(Counter)
|
||||
}
|
||||
|
||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||
// returned an error. By not returning an error, With allows shortcuts like
|
||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
func (m *CounterVec) With(labels Labels) Counter {
|
||||
return m.MetricVec.With(labels).(Counter)
|
||||
}
|
||||
|
||||
// CounterFunc is a Counter whose value is determined at collect time by calling a
|
||||
// provided function.
|
||||
//
|
||||
// To create CounterFunc instances, use NewCounterFunc.
|
||||
type CounterFunc interface {
|
||||
Metric
|
||||
Collector
|
||||
}
|
||||
|
||||
// NewCounterFunc creates a new CounterFunc based on the provided
|
||||
// CounterOpts. The value reported is determined by calling the given function
|
||||
// from within the Write method. Take into account that metric collection may
|
||||
// happen concurrently. If that results in concurrent calls to Write, like in
|
||||
// the case where a CounterFunc is directly registered with Prometheus, the
|
||||
// provided function must be concurrency-safe. The function should also honor
|
||||
// the contract for a Counter (values only go up, not down), but compliance will
|
||||
// not be checked.
|
||||
func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
|
||||
return newValueFunc(NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
nil,
|
||||
opts.ConstLabels,
|
||||
), CounterValue, function)
|
||||
}
|
||||
58
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/counter_test.go
generated
vendored
Normal file
58
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/counter_test.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
func TestCounterAdd(t *testing.T) {
|
||||
counter := NewCounter(CounterOpts{
|
||||
Name: "test",
|
||||
Help: "test help",
|
||||
ConstLabels: Labels{"a": "1", "b": "2"},
|
||||
}).(*counter)
|
||||
counter.Inc()
|
||||
if expected, got := 1., math.Float64frombits(counter.valBits); expected != got {
|
||||
t.Errorf("Expected %f, got %f.", expected, got)
|
||||
}
|
||||
counter.Add(42)
|
||||
if expected, got := 43., math.Float64frombits(counter.valBits); expected != got {
|
||||
t.Errorf("Expected %f, got %f.", expected, got)
|
||||
}
|
||||
|
||||
if expected, got := "counter cannot decrease in value", decreaseCounter(counter).Error(); expected != got {
|
||||
t.Errorf("Expected error %q, got %q.", expected, got)
|
||||
}
|
||||
|
||||
m := &dto.Metric{}
|
||||
counter.Write(m)
|
||||
|
||||
if expected, got := `label:<name:"a" value:"1" > label:<name:"b" value:"2" > counter:<value:43 > `, m.String(); expected != got {
|
||||
t.Errorf("expected %q, got %q", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func decreaseCounter(c *counter) (err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = e.(error)
|
||||
}
|
||||
}()
|
||||
c.Add(-1)
|
||||
return nil
|
||||
}
|
||||
199
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
Normal file
199
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
Normal file
@@ -0,0 +1,199 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/client_golang/model"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"code.google.com/p/goprotobuf/proto"
|
||||
)
|
||||
|
||||
var (
|
||||
metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
|
||||
labelNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*$`)
|
||||
)
|
||||
|
||||
// Labels represents a collection of label name -> value mappings. This type is
|
||||
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
|
||||
// metric vector Collectors, e.g.:
|
||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
//
|
||||
// The other use-case is the specification of constant label pairs in Opts or to
|
||||
// create a Desc.
|
||||
type Labels map[string]string
|
||||
|
||||
// Desc is the descriptor used by every Prometheus Metric. It is essentially
|
||||
// the immutable meta-data of a Metric. The normal Metric implementations
|
||||
// included in this package manage their Desc under the hood. Users only have to
|
||||
// deal with Desc if they use advanced features like the ExpvarCollector or
|
||||
// custom Collectors and Metrics.
|
||||
//
|
||||
// Descriptors registered with the same registry have to fulfill certain
|
||||
// consistency and uniqueness criteria if they share the same fully-qualified
|
||||
// name: They must have the same help string and the same label names (aka label
|
||||
// dimensions) in each, constLabels and variableLabels, but they must differ in
|
||||
// the values of the constLabels.
|
||||
//
|
||||
// Descriptors that share the same fully-qualified names and the same label
|
||||
// values of their constLabels are considered equal.
|
||||
//
|
||||
// Use NewDesc to create new Desc instances.
|
||||
type Desc struct {
|
||||
// fqName has been built from Namespace, Subsystem, and Name.
|
||||
fqName string
|
||||
// help provides some helpful information about this metric.
|
||||
help string
|
||||
// constLabelPairs contains precalculated DTO label pairs based on
|
||||
// the constant labels.
|
||||
constLabelPairs []*dto.LabelPair
|
||||
// VariableLabels contains names of labels for which the metric
|
||||
// maintains variable values.
|
||||
variableLabels []string
|
||||
// id is a hash of the values of the ConstLabels and fqName. This
|
||||
// must be unique among all registered descriptors and can therefore be
|
||||
// used as an identifier of the descriptor.
|
||||
id uint64
|
||||
// dimHash is a hash of the label names (preset and variable) and the
|
||||
// Help string. Each Desc with the same fqName must have the same
|
||||
// dimHash.
|
||||
dimHash uint64
|
||||
// err is an error that occured during construction. It is reported on
|
||||
// registration time.
|
||||
err error
|
||||
}
|
||||
|
||||
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
|
||||
// and will be reported on registration time. variableLabels and constLabels can
|
||||
// be nil if no such labels should be set. fqName and help must not be empty.
|
||||
//
|
||||
// variableLabels only contain the label names. Their label values are variable
|
||||
// and therefore not part of the Desc. (They are managed within the Metric.)
|
||||
//
|
||||
// For constLabels, the label values are constant. Therefore, they are fully
|
||||
// specified in the Desc. See the Opts documentation for the implications of
|
||||
// constant labels.
|
||||
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
|
||||
d := &Desc{
|
||||
fqName: fqName,
|
||||
help: help,
|
||||
variableLabels: variableLabels,
|
||||
}
|
||||
if help == "" {
|
||||
d.err = errors.New("empty help string")
|
||||
return d
|
||||
}
|
||||
if !metricNameRE.MatchString(fqName) {
|
||||
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
|
||||
return d
|
||||
}
|
||||
// labelValues contains the label values of const labels (in order of
|
||||
// their sorted label names) plus the fqName (at position 0).
|
||||
labelValues := make([]string, 1, len(constLabels)+1)
|
||||
labelValues[0] = fqName
|
||||
labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
|
||||
labelNameSet := map[string]struct{}{}
|
||||
// First add only the const label names and sort them...
|
||||
for labelName := range constLabels {
|
||||
if !checkLabelName(labelName) {
|
||||
d.err = fmt.Errorf("%q is not a valid label name", labelName)
|
||||
return d
|
||||
}
|
||||
labelNames = append(labelNames, labelName)
|
||||
labelNameSet[labelName] = struct{}{}
|
||||
}
|
||||
sort.Strings(labelNames)
|
||||
// ... so that we can now add const label values in the order of their names.
|
||||
for _, labelName := range labelNames {
|
||||
labelValues = append(labelValues, constLabels[labelName])
|
||||
}
|
||||
// Now add the variable label names, but prefix them with something that
|
||||
// cannot be in a regular label name. That prevents matching the label
|
||||
// dimension with a different mix between preset and variable labels.
|
||||
for _, labelName := range variableLabels {
|
||||
if !checkLabelName(labelName) {
|
||||
d.err = fmt.Errorf("%q is not a valid label name", labelName)
|
||||
return d
|
||||
}
|
||||
labelNames = append(labelNames, "$"+labelName)
|
||||
labelNameSet[labelName] = struct{}{}
|
||||
}
|
||||
if len(labelNames) != len(labelNameSet) {
|
||||
d.err = errors.New("duplicate label names")
|
||||
return d
|
||||
}
|
||||
h := fnv.New64a()
|
||||
var b bytes.Buffer // To copy string contents into, avoiding []byte allocations.
|
||||
for _, val := range labelValues {
|
||||
b.Reset()
|
||||
b.WriteString(val)
|
||||
b.WriteByte(model.SeparatorByte)
|
||||
h.Write(b.Bytes())
|
||||
}
|
||||
d.id = h.Sum64()
|
||||
// Sort labelNames so that order doesn't matter for the hash.
|
||||
sort.Strings(labelNames)
|
||||
// Now hash together (in this order) the help string and the sorted
|
||||
// label names.
|
||||
h.Reset()
|
||||
b.Reset()
|
||||
b.WriteString(help)
|
||||
b.WriteByte(model.SeparatorByte)
|
||||
h.Write(b.Bytes())
|
||||
for _, labelName := range labelNames {
|
||||
b.Reset()
|
||||
b.WriteString(labelName)
|
||||
b.WriteByte(model.SeparatorByte)
|
||||
h.Write(b.Bytes())
|
||||
}
|
||||
d.dimHash = h.Sum64()
|
||||
|
||||
d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
|
||||
for n, v := range constLabels {
|
||||
d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
|
||||
Name: proto.String(n),
|
||||
Value: proto.String(v),
|
||||
})
|
||||
}
|
||||
sort.Sort(LabelPairSorter(d.constLabelPairs))
|
||||
return d
|
||||
}
|
||||
|
||||
// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
|
||||
// provided error set. If a collector returning such a descriptor is registered,
|
||||
// registration will fail with the provided error. NewInvalidDesc can be used by
|
||||
// a Collector to signal inability to describe itself.
|
||||
func NewInvalidDesc(err error) *Desc {
|
||||
return &Desc{
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Desc) String() string {
|
||||
lpStrings := make([]string, 0, len(d.constLabelPairs))
|
||||
for _, lp := range d.constLabelPairs {
|
||||
lpStrings = append(
|
||||
lpStrings,
|
||||
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
|
||||
)
|
||||
}
|
||||
return fmt.Sprintf(
|
||||
"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
|
||||
d.fqName,
|
||||
d.help,
|
||||
strings.Join(lpStrings, ","),
|
||||
d.variableLabels,
|
||||
)
|
||||
}
|
||||
|
||||
func checkLabelName(l string) bool {
|
||||
return labelNameRE.MatchString(l) &&
|
||||
!strings.HasPrefix(l, model.ReservedLabelPrefix)
|
||||
}
|
||||
108
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
Normal file
108
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package prometheus provides embeddable metric primitives for servers and
|
||||
// standardized exposition of telemetry through a web services interface.
|
||||
//
|
||||
// All exported functions and methods are safe to be used concurrently unless
|
||||
// specified otherwise.
|
||||
//
|
||||
// To expose metrics registered with the Prometheus registry, an HTTP server
|
||||
// needs to know about the Prometheus handler. The usual endpoint is "/metrics".
|
||||
//
|
||||
// http.Handle("/metrics", prometheus.Handler())
|
||||
//
|
||||
// As a starting point a very basic usage example:
|
||||
//
|
||||
// package main
|
||||
//
|
||||
// import (
|
||||
// "net/http"
|
||||
//
|
||||
// "github.com/prometheus/client_golang/prometheus"
|
||||
// )
|
||||
//
|
||||
// var (
|
||||
// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
// Name: "cpu_temperature_celsius",
|
||||
// Help: "Current temperature of the CPU.",
|
||||
// })
|
||||
// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
// Name: "hd_errors_total",
|
||||
// Help: "Number of hard-disk errors.",
|
||||
// })
|
||||
// )
|
||||
//
|
||||
// func init() {
|
||||
// prometheus.MustRegister(cpuTemp)
|
||||
// prometheus.MustRegister(hdFailures)
|
||||
// }
|
||||
//
|
||||
// func main() {
|
||||
// cpuTemp.Set(65.3)
|
||||
// hdFailures.Inc()
|
||||
//
|
||||
// http.Handle("/metrics", prometheus.Handler())
|
||||
// http.ListenAndServe(":8080", nil)
|
||||
// }
|
||||
//
|
||||
//
|
||||
// This is a complete program that exports two metrics, a Gauge and a Counter.
|
||||
// It also exports some stats about the HTTP usage of the /metrics
|
||||
// endpoint. (See the Handler function for more detail.)
|
||||
//
|
||||
// A more advanced metric type is the Summary.
|
||||
//
|
||||
// In addition to the fundamental metric types Gauge, Counter, and Summary, a
|
||||
// very important part of the Prometheus data model is the partitioning of
|
||||
// samples along dimensions called labels, which results in metric vectors. The
|
||||
// fundamental types are GaugeVec, CounterVec, and SummaryVec.
|
||||
//
|
||||
// Those are all the parts needed for basic usage. Detailed documentation and
|
||||
// examples are provided below.
|
||||
//
|
||||
// Everything else this package offers is essentially for "power users" only. A
|
||||
// few pointers to "power user features":
|
||||
//
|
||||
// All the various ...Opts structs have a ConstLabels field for labels that
|
||||
// never change their value (which is only useful under special circumstances,
|
||||
// see documentation of the Opts type).
|
||||
//
|
||||
// The Untyped metric behaves like a Gauge, but signals the Prometheus server
|
||||
// not to assume anything about its type.
|
||||
//
|
||||
// Functions to fine-tune how the metric registry works: EnableCollectChecks,
|
||||
// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook.
|
||||
//
|
||||
// For custom metric collection, there are two entry points: Custom Metric
|
||||
// implementations and custom Collector implementations. A Metric is the
|
||||
// fundamental unit in the Prometheus data model: a sample at a point in time
|
||||
// together with its meta-data (like its fully-qualified name and any number of
|
||||
// pairs of label name and label value) that knows how to marshal itself into a
|
||||
// data transfer object (aka DTO, implemented as a protocol buffer). A Collector
|
||||
// gets registered with the Prometheus registry and manages the collection of
|
||||
// one or more Metrics. Many parts of this package are building blocks for
|
||||
// Metrics and Collectors. Desc is the metric descriptor, actually used by all
|
||||
// metrics under the hood, and by Collectors to describe the Metrics to be
|
||||
// collected, but only to be dealt with by users if they implement their own
|
||||
// Metrics or Collectors. To create a Desc, the BuildFQName function will come
|
||||
// in handy. Other useful components for Metric and Collector implementation
|
||||
// include: LabelPairSorter to sort the DTO version of label pairs,
|
||||
// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at
|
||||
// collection time, MetricVec to bundle custom Metrics into a metric vector
|
||||
// Collector, SelfCollector to make a custom Metric collect itself.
|
||||
//
|
||||
// A good example for a custom Collector is the ExpVarCollector included in this
|
||||
// package, which exports variables exported via the "expvar" package as
|
||||
// Prometheus metrics.
|
||||
package prometheus
|
||||
130
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go
generated
vendored
Normal file
130
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go
generated
vendored
Normal file
@@ -0,0 +1,130 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus_test
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// ClusterManager is an example for a system that might have been built without
|
||||
// Prometheus in mind. It models a central manager of jobs running in a
|
||||
// cluster. To turn it into something that collects Prometheus metrics, we
|
||||
// simply add the two methods required for the Collector interface.
|
||||
//
|
||||
// An additional challenge is that multiple instances of the ClusterManager are
|
||||
// run within the same binary, each in charge of a different zone. We need to
|
||||
// make use of ConstLabels to be able to register each ClusterManager instance
|
||||
// with Prometheus.
|
||||
type ClusterManager struct {
|
||||
Zone string
|
||||
OOMCount *prometheus.CounterVec
|
||||
RAMUsage *prometheus.GaugeVec
|
||||
mtx sync.Mutex // Protects OOMCount and RAMUsage.
|
||||
// ... many more fields
|
||||
}
|
||||
|
||||
// ReallyExpensiveAssessmentOfTheSystemState is a mock for the data gathering a
|
||||
// real cluster manager would have to do. Since it may actually be really
|
||||
// expensive, it must only be called once per collection. This implementation,
|
||||
// obviously, only returns some made-up data.
|
||||
func (c *ClusterManager) ReallyExpensiveAssessmentOfTheSystemState() (
|
||||
oomCountByHost map[string]int, ramUsageByHost map[string]float64,
|
||||
) {
|
||||
// Just example fake data.
|
||||
oomCountByHost = map[string]int{
|
||||
"foo.example.org": 42,
|
||||
"bar.example.org": 2001,
|
||||
}
|
||||
ramUsageByHost = map[string]float64{
|
||||
"foo.example.org": 6.023e23,
|
||||
"bar.example.org": 3.14,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Describe faces the interesting challenge that the two metric vectors that are
|
||||
// used in this example are already Collectors themselves. However, thanks to
|
||||
// the use of channels, it is really easy to "chain" Collectors. Here we simply
|
||||
// call the Describe methods of the two metric vectors.
|
||||
func (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) {
|
||||
c.OOMCount.Describe(ch)
|
||||
c.RAMUsage.Describe(ch)
|
||||
}
|
||||
|
||||
// Collect first triggers the ReallyExpensiveAssessmentOfTheSystemState. Then it
|
||||
// sets the retrieved values in the two metric vectors and then sends all their
|
||||
// metrics to the channel (again using a chaining technique as in the Describe
|
||||
// method). Since Collect could be called multiple times concurrently, that part
|
||||
// is protected by a mutex.
|
||||
func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) {
|
||||
oomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState()
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
for host, oomCount := range oomCountByHost {
|
||||
c.OOMCount.WithLabelValues(host).Set(float64(oomCount))
|
||||
}
|
||||
for host, ramUsage := range ramUsageByHost {
|
||||
c.RAMUsage.WithLabelValues(host).Set(ramUsage)
|
||||
}
|
||||
c.OOMCount.Collect(ch)
|
||||
c.RAMUsage.Collect(ch)
|
||||
// All metrics in OOMCount and RAMUsage are sent to the channel now. We
|
||||
// can safely reset the two metric vectors now, so that we can start
|
||||
// fresh in the next Collect cycle. (Imagine a host disappears from the
|
||||
// cluster. If we did not reset here, its Metric would stay in the
|
||||
// metric vectors forever.)
|
||||
c.OOMCount.Reset()
|
||||
c.RAMUsage.Reset()
|
||||
}
|
||||
|
||||
// NewClusterManager creates the two metric vectors OOMCount and RAMUsage. Note
|
||||
// that the zone is set as a ConstLabel. (It's different in each instance of the
|
||||
// ClusterManager, but constant over the lifetime of an instance.) The reported
|
||||
// values are partitioned by host, which is therefore a variable label.
|
||||
func NewClusterManager(zone string) *ClusterManager {
|
||||
return &ClusterManager{
|
||||
Zone: zone,
|
||||
OOMCount: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: "clustermanager",
|
||||
Name: "oom_count",
|
||||
Help: "number of OOM crashes",
|
||||
ConstLabels: prometheus.Labels{"zone": zone},
|
||||
},
|
||||
[]string{"host"},
|
||||
),
|
||||
RAMUsage: prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: "clustermanager",
|
||||
Name: "ram_usage_bytes",
|
||||
Help: "RAM usage as reported to the cluster manager",
|
||||
ConstLabels: prometheus.Labels{"zone": zone},
|
||||
},
|
||||
[]string{"host"},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleCollector_clustermanager() {
|
||||
workerDB := NewClusterManager("db")
|
||||
workerCA := NewClusterManager("ca")
|
||||
prometheus.MustRegister(workerDB)
|
||||
prometheus.MustRegister(workerCA)
|
||||
|
||||
// Since we are dealing with custom Collector implementations, it might
|
||||
// be a good idea to enable the collect checks in the registry.
|
||||
prometheus.EnableCollectChecks(true)
|
||||
}
|
||||
87
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_memstats_test.go
generated
vendored
Normal file
87
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_memstats_test.go
generated
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus_test
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
allocDesc = prometheus.NewDesc(
|
||||
prometheus.BuildFQName("", "memstats", "alloc_bytes"),
|
||||
"bytes allocated and still in use",
|
||||
nil, nil,
|
||||
)
|
||||
totalAllocDesc = prometheus.NewDesc(
|
||||
prometheus.BuildFQName("", "memstats", "total_alloc_bytes"),
|
||||
"bytes allocated (even if freed)",
|
||||
nil, nil,
|
||||
)
|
||||
numGCDesc = prometheus.NewDesc(
|
||||
prometheus.BuildFQName("", "memstats", "num_gc_total"),
|
||||
"number of GCs run",
|
||||
nil, nil,
|
||||
)
|
||||
)
|
||||
|
||||
// MemStatsCollector is an example for a custom Collector that solves the
|
||||
// problem of feeding into multiple metrics at the same time. The
|
||||
// runtime.ReadMemStats should happen only once, and then the results need to be
|
||||
// fed into a number of separate Metrics. In this example, only a few of the
|
||||
// values reported by ReadMemStats are used. For each, there is a Desc provided
|
||||
// as a var, so the MemStatsCollector itself needs nothing else in the
|
||||
// struct. Only the methods need to be implemented.
|
||||
type MemStatsCollector struct{}
|
||||
|
||||
// Describe just sends the three Desc objects for the Metrics we intend to
|
||||
// collect.
|
||||
func (_ MemStatsCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- allocDesc
|
||||
ch <- totalAllocDesc
|
||||
ch <- numGCDesc
|
||||
}
|
||||
|
||||
// Collect does the trick by calling ReadMemStats once and then constructing
|
||||
// three different Metrics on the fly.
|
||||
func (_ MemStatsCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
var ms runtime.MemStats
|
||||
runtime.ReadMemStats(&ms)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
allocDesc,
|
||||
prometheus.GaugeValue,
|
||||
float64(ms.Alloc),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
totalAllocDesc,
|
||||
prometheus.GaugeValue,
|
||||
float64(ms.TotalAlloc),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
numGCDesc,
|
||||
prometheus.CounterValue,
|
||||
float64(ms.NumGC),
|
||||
)
|
||||
// To avoid new allocations on each collection, you could also keep
|
||||
// metric objects around and return the same objects each time, just
|
||||
// with new values set.
|
||||
}
|
||||
|
||||
func ExampleCollector_memstats() {
|
||||
prometheus.MustRegister(&MemStatsCollector{})
|
||||
// Since we are dealing with custom Collector implementations, it might
|
||||
// be a good idea to enable the collect checks in the registry.
|
||||
prometheus.EnableCollectChecks(true)
|
||||
}
|
||||
69
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_selfcollector_test.go
generated
vendored
Normal file
69
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_selfcollector_test.go
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus_test
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"code.google.com/p/goprotobuf/proto"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func NewCallbackMetric(desc *prometheus.Desc, callback func() float64) *CallbackMetric {
|
||||
result := &CallbackMetric{desc: desc, callback: callback}
|
||||
result.Init(result) // Initialize the SelfCollector.
|
||||
return result
|
||||
}
|
||||
|
||||
// TODO: Come up with a better example.
|
||||
|
||||
// CallbackMetric is an example for a user-defined Metric that exports the
|
||||
// result of a function call as a metric of type "untyped" without any
|
||||
// labels. It uses SelfCollector to turn the Metric into a Collector so that it
|
||||
// can be registered with Prometheus.
|
||||
//
|
||||
// Note that this example is pretty much academic as the prometheus package
|
||||
// already provides an UntypedFunc type.
|
||||
type CallbackMetric struct {
|
||||
prometheus.SelfCollector
|
||||
|
||||
desc *prometheus.Desc
|
||||
callback func() float64
|
||||
}
|
||||
|
||||
func (cm *CallbackMetric) Desc() *prometheus.Desc {
|
||||
return cm.desc
|
||||
}
|
||||
|
||||
func (cm *CallbackMetric) Write(m *dto.Metric) error {
|
||||
m.Untyped = &dto.Untyped{Value: proto.Float64(cm.callback())}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ExampleSelfCollector() {
|
||||
m := NewCallbackMetric(
|
||||
prometheus.NewDesc(
|
||||
"runtime_goroutines_count",
|
||||
"Total number of goroutines that currently exist.",
|
||||
nil, nil, // No labels, these must be nil.
|
||||
),
|
||||
func() float64 {
|
||||
return float64(runtime.NumGoroutine())
|
||||
},
|
||||
)
|
||||
prometheus.MustRegister(m)
|
||||
}
|
||||
454
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/examples_test.go
generated
vendored
Normal file
454
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/examples_test.go
generated
vendored
Normal file
@@ -0,0 +1,454 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus_test
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sort"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"code.google.com/p/goprotobuf/proto"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func ExampleGauge() {
|
||||
opsQueued := prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "our_company",
|
||||
Subsystem: "blob_storage",
|
||||
Name: "ops_queued",
|
||||
Help: "Number of blob storage operations waiting to be processed.",
|
||||
})
|
||||
prometheus.MustRegister(opsQueued)
|
||||
|
||||
// 10 operations queued by the goroutine managing incoming requests.
|
||||
opsQueued.Add(10)
|
||||
// A worker goroutine has picked up a waiting operation.
|
||||
opsQueued.Dec()
|
||||
// And once more...
|
||||
opsQueued.Dec()
|
||||
}
|
||||
|
||||
func ExampleGaugeVec() {
|
||||
binaryVersion := flag.String("binary_version", "debug", "Version of the binary: debug, canary, production.")
|
||||
flag.Parse()
|
||||
|
||||
opsQueued := prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "our_company",
|
||||
Subsystem: "blob_storage",
|
||||
Name: "ops_queued",
|
||||
Help: "Number of blob storage operations waiting to be processed, partitioned by user and type.",
|
||||
ConstLabels: prometheus.Labels{"binary_version": *binaryVersion},
|
||||
},
|
||||
[]string{
|
||||
// Which user has requested the operation?
|
||||
"user",
|
||||
// Of what type is the operation?
|
||||
"type",
|
||||
},
|
||||
)
|
||||
prometheus.MustRegister(opsQueued)
|
||||
|
||||
// Increase a value using compact (but order-sensitive!) WithLabelValues().
|
||||
opsQueued.WithLabelValues("bob", "put").Add(4)
|
||||
// Increase a value with a map using WithLabels. More verbose, but order
|
||||
// doesn't matter anymore.
|
||||
opsQueued.With(prometheus.Labels{"type": "delete", "user": "alice"}).Inc()
|
||||
}
|
||||
|
||||
func ExampleGaugeFunc() {
|
||||
if err := prometheus.Register(prometheus.NewGaugeFunc(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: "runtime",
|
||||
Name: "goroutines_count",
|
||||
Help: "Number of goroutines that currently exist.",
|
||||
},
|
||||
func() float64 { return float64(runtime.NumGoroutine()) },
|
||||
)); err == nil {
|
||||
fmt.Println("GaugeFunc 'goroutines_count' registered.")
|
||||
}
|
||||
// Note that the count of goroutines is a gauge (and not a counter) as
|
||||
// it can go up and down.
|
||||
|
||||
// Output:
|
||||
// GaugeFunc 'goroutines_count' registered.
|
||||
}
|
||||
|
||||
func ExampleCounter() {
|
||||
pushCounter := prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "repository_pushes", // Note: No help string...
|
||||
})
|
||||
err := prometheus.Register(pushCounter) // ... so this will return an error.
|
||||
if err != nil {
|
||||
fmt.Println("Push counter couldn't be registered, no counting will happen:", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Try it once more, this time with a help string.
|
||||
pushCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "repository_pushes",
|
||||
Help: "Number of pushes to external repository.",
|
||||
})
|
||||
err = prometheus.Register(pushCounter)
|
||||
if err != nil {
|
||||
fmt.Println("Push counter couldn't be registered AGAIN, no counting will happen:", err)
|
||||
return
|
||||
}
|
||||
|
||||
pushComplete := make(chan struct{})
|
||||
// TODO: Start a goroutine that performs repository pushes and reports
|
||||
// each completion via the channel.
|
||||
for _ = range pushComplete {
|
||||
pushCounter.Inc()
|
||||
}
|
||||
// Output:
|
||||
// Push counter couldn't be registered, no counting will happen: descriptor Desc{fqName: "repository_pushes", help: "", constLabels: {}, variableLabels: []} is invalid: empty help string
|
||||
}
|
||||
|
||||
func ExampleCounterVec() {
|
||||
binaryVersion := flag.String("environment", "test", "Execution environment: test, staging, production.")
|
||||
flag.Parse()
|
||||
|
||||
httpReqs := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "http_requests_total",
|
||||
Help: "How many HTTP requests processed, partitioned by status code and http method.",
|
||||
ConstLabels: prometheus.Labels{"env": *binaryVersion},
|
||||
},
|
||||
[]string{"code", "method"},
|
||||
)
|
||||
prometheus.MustRegister(httpReqs)
|
||||
|
||||
httpReqs.WithLabelValues("404", "POST").Add(42)
|
||||
|
||||
// If you have to access the same set of labels very frequently, it
|
||||
// might be good to retrieve the metric only once and keep a handle to
|
||||
// it. But beware of deletion of that metric, see below!
|
||||
m := httpReqs.WithLabelValues("200", "GET")
|
||||
for i := 0; i < 1000000; i++ {
|
||||
m.Inc()
|
||||
}
|
||||
// Delete a metric from the vector. If you have previously kept a handle
|
||||
// to that metric (as above), future updates via that handle will go
|
||||
// unseen (even if you re-create a metric with the same label set
|
||||
// later).
|
||||
httpReqs.DeleteLabelValues("200", "GET")
|
||||
// Same thing with the more verbose Labels syntax.
|
||||
httpReqs.Delete(prometheus.Labels{"method": "GET", "code": "200"})
|
||||
}
|
||||
|
||||
func ExampleInstrumentHandler() {
|
||||
// Handle the "/doc" endpoint with the standard http.FileServer handler.
|
||||
// By wrapping the handler with InstrumentHandler, request count,
|
||||
// request and response sizes, and request latency are automatically
|
||||
// exported to Prometheus, partitioned by HTTP status code and method
|
||||
// and by the handler name (here "fileserver").
|
||||
http.Handle("/doc", prometheus.InstrumentHandler(
|
||||
"fileserver", http.FileServer(http.Dir("/usr/share/doc")),
|
||||
))
|
||||
// The Prometheus handler still has to be registered to handle the
|
||||
// "/metrics" endpoint. The handler returned by prometheus.Handler() is
|
||||
// already instrumented - with "prometheus" as the handler name. In this
|
||||
// example, we want the handler name to be "metrics", so we instrument
|
||||
// the uninstrumented Prometheus handler ourselves.
|
||||
http.Handle("/metrics", prometheus.InstrumentHandler(
|
||||
"metrics", prometheus.UninstrumentedHandler(),
|
||||
))
|
||||
}
|
||||
|
||||
func ExampleLabelPairSorter() {
|
||||
labelPairs := []*dto.LabelPair{
|
||||
&dto.LabelPair{Name: proto.String("status"), Value: proto.String("404")},
|
||||
&dto.LabelPair{Name: proto.String("method"), Value: proto.String("get")},
|
||||
}
|
||||
|
||||
sort.Sort(prometheus.LabelPairSorter(labelPairs))
|
||||
|
||||
fmt.Println(labelPairs)
|
||||
// Output:
|
||||
// [name:"method" value:"get" name:"status" value:"404" ]
|
||||
}
|
||||
|
||||
func ExampleRegister() {
|
||||
// Imagine you have a worker pool and want to count the tasks completed.
|
||||
taskCounter := prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Subsystem: "worker_pool",
|
||||
Name: "completed_tasks_total",
|
||||
Help: "Total number of tasks completed.",
|
||||
})
|
||||
// This will register fine.
|
||||
if err := prometheus.Register(taskCounter); err != nil {
|
||||
fmt.Println(err)
|
||||
} else {
|
||||
fmt.Println("taskCounter registered.")
|
||||
}
|
||||
// Don't forget to tell the HTTP server about the Prometheus handler.
|
||||
// (In a real program, you still need to start the http server...)
|
||||
http.Handle("/metrics", prometheus.Handler())
|
||||
|
||||
// Now you can start workers and give every one of them a pointer to
|
||||
// taskCounter and let it increment it whenever it completes a task.
|
||||
taskCounter.Inc() // This has to happen somewhere in the worker code.
|
||||
|
||||
// But wait, you want to see how individual workers perform. So you need
|
||||
// a vector of counters, with one element for each worker.
|
||||
taskCounterVec := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: "worker_pool",
|
||||
Name: "completed_tasks_total",
|
||||
Help: "Total number of tasks completed.",
|
||||
},
|
||||
[]string{"worker_id"},
|
||||
)
|
||||
|
||||
// Registering will fail because we already have a metric of that name.
|
||||
if err := prometheus.Register(taskCounterVec); err != nil {
|
||||
fmt.Println("taskCounterVec not registered:", err)
|
||||
} else {
|
||||
fmt.Println("taskCounterVec registered.")
|
||||
}
|
||||
|
||||
// To fix, first unregister the old taskCounter.
|
||||
if prometheus.Unregister(taskCounter) {
|
||||
fmt.Println("taskCounter unregistered.")
|
||||
}
|
||||
|
||||
// Try registering taskCounterVec again.
|
||||
if err := prometheus.Register(taskCounterVec); err != nil {
|
||||
fmt.Println("taskCounterVec not registered:", err)
|
||||
} else {
|
||||
fmt.Println("taskCounterVec registered.")
|
||||
}
|
||||
// Bummer! Still doesn't work.
|
||||
|
||||
// Prometheus will not allow you to ever export metrics with
|
||||
// inconsistent help strings or label names. After unregistering, the
|
||||
// unregistered metrics will cease to show up in the /metrics http
|
||||
// response, but the registry still remembers that those metrics had
|
||||
// been exported before. For this example, we will now choose a
|
||||
// different name. (In a real program, you would obviously not export
|
||||
// the obsolete metric in the first place.)
|
||||
taskCounterVec = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: "worker_pool",
|
||||
Name: "completed_tasks_by_id",
|
||||
Help: "Total number of tasks completed.",
|
||||
},
|
||||
[]string{"worker_id"},
|
||||
)
|
||||
if err := prometheus.Register(taskCounterVec); err != nil {
|
||||
fmt.Println("taskCounterVec not registered:", err)
|
||||
} else {
|
||||
fmt.Println("taskCounterVec registered.")
|
||||
}
|
||||
// Finally it worked!
|
||||
|
||||
// The workers have to tell taskCounterVec their id to increment the
|
||||
// right element in the metric vector.
|
||||
taskCounterVec.WithLabelValues("42").Inc() // Code from worker 42.
|
||||
|
||||
// Each worker could also keep a reference to their own counter element
|
||||
// around. Pick the counter at initialization time of the worker.
|
||||
myCounter := taskCounterVec.WithLabelValues("42") // From worker 42 initialization code.
|
||||
myCounter.Inc() // Somewhere in the code of that worker.
|
||||
|
||||
// Note that something like WithLabelValues("42", "spurious arg") would
|
||||
// panic (because you have provided too many label values). If you want
|
||||
// to get an error instead, use GetMetricWithLabelValues(...) instead.
|
||||
notMyCounter, err := taskCounterVec.GetMetricWithLabelValues("42", "spurious arg")
|
||||
if err != nil {
|
||||
fmt.Println("Worker initialization failed:", err)
|
||||
}
|
||||
if notMyCounter == nil {
|
||||
fmt.Println("notMyCounter is nil.")
|
||||
}
|
||||
|
||||
// A different (and somewhat tricky) approach is to use
|
||||
// ConstLabels. ConstLabels are pairs of label names and label values
|
||||
// that never change. You might ask what those labels are good for (and
|
||||
// rightfully so - if they never change, they could as well be part of
|
||||
// the metric name). There are essentially two use-cases: The first is
|
||||
// if labels are constant throughout the lifetime of a binary execution,
|
||||
// but they vary over time or between different instances of a running
|
||||
// binary. The second is what we have here: Each worker creates and
|
||||
// registers an own Counter instance where the only difference is in the
|
||||
// value of the ConstLabels. Those Counters can all be registered
|
||||
// because the different ConstLabel values guarantee that each worker
|
||||
// will increment a different Counter metric.
|
||||
counterOpts := prometheus.CounterOpts{
|
||||
Subsystem: "worker_pool",
|
||||
Name: "completed_tasks",
|
||||
Help: "Total number of tasks completed.",
|
||||
ConstLabels: prometheus.Labels{"worker_id": "42"},
|
||||
}
|
||||
taskCounterForWorker42 := prometheus.NewCounter(counterOpts)
|
||||
if err := prometheus.Register(taskCounterForWorker42); err != nil {
|
||||
fmt.Println("taskCounterVForWorker42 not registered:", err)
|
||||
} else {
|
||||
fmt.Println("taskCounterForWorker42 registered.")
|
||||
}
|
||||
// Obviously, in real code, taskCounterForWorker42 would be a member
|
||||
// variable of a worker struct, and the "42" would be retrieved with a
|
||||
// GetId() method or something. The Counter would be created and
|
||||
// registered in the initialization code of the worker.
|
||||
|
||||
// For the creation of the next Counter, we can recycle
|
||||
// counterOpts. Just change the ConstLabels.
|
||||
counterOpts.ConstLabels = prometheus.Labels{"worker_id": "2001"}
|
||||
taskCounterForWorker2001 := prometheus.NewCounter(counterOpts)
|
||||
if err := prometheus.Register(taskCounterForWorker2001); err != nil {
|
||||
fmt.Println("taskCounterVForWorker2001 not registered:", err)
|
||||
} else {
|
||||
fmt.Println("taskCounterForWorker2001 registered.")
|
||||
}
|
||||
|
||||
taskCounterForWorker2001.Inc()
|
||||
taskCounterForWorker42.Inc()
|
||||
taskCounterForWorker2001.Inc()
|
||||
|
||||
// Yet another approach would be to turn the workers themselves into
|
||||
// Collectors and register them. See the Collector example for details.
|
||||
|
||||
// Output:
|
||||
// taskCounter registered.
|
||||
// taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string
|
||||
// taskCounter unregistered.
|
||||
// taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string
|
||||
// taskCounterVec registered.
|
||||
// Worker initialization failed: inconsistent label cardinality
|
||||
// notMyCounter is nil.
|
||||
// taskCounterForWorker42 registered.
|
||||
// taskCounterForWorker2001 registered.
|
||||
}
|
||||
|
||||
func ExampleSummary() {
|
||||
temps := prometheus.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "pond_temperature_celsius",
|
||||
Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells.
|
||||
})
|
||||
|
||||
// Simulate some observations.
|
||||
for i := 0; i < 1000; i++ {
|
||||
temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10)
|
||||
}
|
||||
|
||||
// Just for demonstration, let's check the state of the summary by
|
||||
// (ab)using its Write method (which is usually only used by Prometheus
|
||||
// internally).
|
||||
metric := &dto.Metric{}
|
||||
temps.Write(metric)
|
||||
fmt.Println(proto.MarshalTextString(metric))
|
||||
|
||||
// Output:
|
||||
// summary: <
|
||||
// sample_count: 1000
|
||||
// sample_sum: 29969.50000000001
|
||||
// quantile: <
|
||||
// quantile: 0.5
|
||||
// value: 31.1
|
||||
// >
|
||||
// quantile: <
|
||||
// quantile: 0.9
|
||||
// value: 41.3
|
||||
// >
|
||||
// quantile: <
|
||||
// quantile: 0.99
|
||||
// value: 41.9
|
||||
// >
|
||||
// >
|
||||
}
|
||||
|
||||
func ExampleSummaryVec() {
|
||||
temps := prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "pond_temperature_celsius",
|
||||
Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells.
|
||||
},
|
||||
[]string{"species"},
|
||||
)
|
||||
|
||||
// Simulate some observations.
|
||||
for i := 0; i < 1000; i++ {
|
||||
temps.WithLabelValues("litoria-caerulea").Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10)
|
||||
temps.WithLabelValues("lithobates-catesbeianus").Observe(32 + math.Floor(100*math.Cos(float64(i)*0.11))/10)
|
||||
}
|
||||
|
||||
// Just for demonstration, let's check the state of the summary vector
|
||||
// by (ab)using its Collect method and the Write method of its elements
|
||||
// (which is usually only used by Prometheus internally - code like the
|
||||
// following will never appear in your own code).
|
||||
metricChan := make(chan prometheus.Metric)
|
||||
go func() {
|
||||
defer close(metricChan)
|
||||
temps.Collect(metricChan)
|
||||
}()
|
||||
|
||||
metricStrings := []string{}
|
||||
for metric := range metricChan {
|
||||
dtoMetric := &dto.Metric{}
|
||||
metric.Write(dtoMetric)
|
||||
metricStrings = append(metricStrings, proto.MarshalTextString(dtoMetric))
|
||||
}
|
||||
sort.Strings(metricStrings) // For reproducible print order.
|
||||
fmt.Println(metricStrings)
|
||||
|
||||
// Output:
|
||||
// [label: <
|
||||
// name: "species"
|
||||
// value: "lithobates-catesbeianus"
|
||||
// >
|
||||
// summary: <
|
||||
// sample_count: 1000
|
||||
// sample_sum: 31956.100000000017
|
||||
// quantile: <
|
||||
// quantile: 0.5
|
||||
// value: 32.4
|
||||
// >
|
||||
// quantile: <
|
||||
// quantile: 0.9
|
||||
// value: 41.4
|
||||
// >
|
||||
// quantile: <
|
||||
// quantile: 0.99
|
||||
// value: 41.9
|
||||
// >
|
||||
// >
|
||||
// label: <
|
||||
// name: "species"
|
||||
// value: "litoria-caerulea"
|
||||
// >
|
||||
// summary: <
|
||||
// sample_count: 1000
|
||||
// sample_sum: 29969.50000000001
|
||||
// quantile: <
|
||||
// quantile: 0.5
|
||||
// value: 31.1
|
||||
// >
|
||||
// quantile: <
|
||||
// quantile: 0.9
|
||||
// value: 41.3
|
||||
// >
|
||||
// quantile: <
|
||||
// quantile: 0.99
|
||||
// value: 41.9
|
||||
// >
|
||||
// >
|
||||
// ]
|
||||
}
|
||||
119
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/expvar.go
generated
vendored
Normal file
119
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/expvar.go
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"expvar"
|
||||
)
|
||||
|
||||
// ExpvarCollector collects metrics from the expvar interface. It provides a
|
||||
// quick way to expose numeric values that are already exported via expvar as
|
||||
// Prometheus metrics. Note that the data models of expvar and Prometheus are
|
||||
// fundamentally different, and that the ExpvarCollector is inherently
|
||||
// slow. Thus, the ExpvarCollector is probably great for experiments and
|
||||
// prototying, but you should seriously consider a more direct implementation of
|
||||
// Prometheus metrics for monitoring production systems.
|
||||
//
|
||||
// Use NewExpvarCollector to create new instances.
|
||||
type ExpvarCollector struct {
|
||||
exports map[string]*Desc
|
||||
}
|
||||
|
||||
// NewExpvarCollector returns a newly allocated ExpvarCollector that still has
|
||||
// to be registered with the Prometheus registry.
|
||||
//
|
||||
// The exports map has the following meaning:
|
||||
//
|
||||
// The keys in the map correspond to expvar keys, i.e. for every expvar key you
|
||||
// want to export as Prometheus metric, you need an entry in the exports
|
||||
// map. The descriptor mapped to each key describes how to export the expvar
|
||||
// value. It defines the name and the help string of the Prometheus metric
|
||||
// proxying the expvar value. The type will always be Untyped.
|
||||
//
|
||||
// For descriptors without variable labels, the expvar value must be a number or
|
||||
// a bool. The number is then directly exported as the Prometheus sample
|
||||
// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
|
||||
// that are not numbers or bools are silently ignored.
|
||||
//
|
||||
// If the descriptor has one variable label, the expvar value must be an expvar
|
||||
// map. The keys in the expvar map become the various values of the one
|
||||
// Prometheus label. The values in the expvar map must be numbers or bools again
|
||||
// as above.
|
||||
//
|
||||
// For descriptors with more than one variable label, the expvar must be a
|
||||
// nested expvar map, i.e. where the values of the topmost map are maps again
|
||||
// etc. until a depth is reached that corresponds to the number of labels. The
|
||||
// leaves of that structure must be numbers or bools as above to serve as the
|
||||
// sample values.
|
||||
//
|
||||
// Anything that does not fit into the scheme above is silently ignored.
|
||||
func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector {
|
||||
return &ExpvarCollector{
|
||||
exports: exports,
|
||||
}
|
||||
}
|
||||
|
||||
// Describe implements Collector.
|
||||
func (e *ExpvarCollector) Describe(ch chan<- *Desc) {
|
||||
for _, desc := range e.exports {
|
||||
ch <- desc
|
||||
}
|
||||
}
|
||||
|
||||
// Collect implements Collector.
|
||||
func (e *ExpvarCollector) Collect(ch chan<- Metric) {
|
||||
for name, desc := range e.exports {
|
||||
var m Metric
|
||||
expVar := expvar.Get(name)
|
||||
if expVar == nil {
|
||||
continue
|
||||
}
|
||||
var v interface{}
|
||||
labels := make([]string, len(desc.variableLabels))
|
||||
if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
|
||||
ch <- NewInvalidMetric(desc, err)
|
||||
continue
|
||||
}
|
||||
var processValue func(v interface{}, i int)
|
||||
processValue = func(v interface{}, i int) {
|
||||
if i >= len(labels) {
|
||||
copiedLabels := append(make([]string, 0, len(labels)), labels...)
|
||||
switch v := v.(type) {
|
||||
case float64:
|
||||
m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
|
||||
case bool:
|
||||
if v {
|
||||
m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
|
||||
} else {
|
||||
m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
|
||||
}
|
||||
default:
|
||||
return
|
||||
}
|
||||
ch <- m
|
||||
return
|
||||
}
|
||||
vm, ok := v.(map[string]interface{})
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
for lv, val := range vm {
|
||||
labels[i] = lv
|
||||
processValue(val, i+1)
|
||||
}
|
||||
}
|
||||
processValue(v, 0)
|
||||
}
|
||||
}
|
||||
97
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/expvar_test.go
generated
vendored
Normal file
97
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/expvar_test.go
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus_test
|
||||
|
||||
import (
|
||||
"expvar"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func ExampleExpvarCollector() {
|
||||
expvarCollector := prometheus.NewExpvarCollector(map[string]*prometheus.Desc{
|
||||
"memstats": prometheus.NewDesc(
|
||||
"expvar_memstats",
|
||||
"All numeric memstats as one metric family. Not a good role-model, actually... ;-)",
|
||||
[]string{"type"}, nil,
|
||||
),
|
||||
"lone-int": prometheus.NewDesc(
|
||||
"expvar_lone_int",
|
||||
"Just an expvar int as an example.",
|
||||
nil, nil,
|
||||
),
|
||||
"http-request-map": prometheus.NewDesc(
|
||||
"expvar_http_request_total",
|
||||
"How many http requests processed, partitioned by status code and http method.",
|
||||
[]string{"code", "method"}, nil,
|
||||
),
|
||||
})
|
||||
prometheus.MustRegister(expvarCollector)
|
||||
|
||||
// The Prometheus part is done here. But to show that this example is
|
||||
// doing anything, we have to manually export something via expvar. In
|
||||
// real-life use-cases, some library would already have exported via
|
||||
// expvar what we want to re-export as Prometheus metrics.
|
||||
expvar.NewInt("lone-int").Set(42)
|
||||
expvarMap := expvar.NewMap("http-request-map")
|
||||
var (
|
||||
expvarMap1, expvarMap2 expvar.Map
|
||||
expvarInt11, expvarInt12, expvarInt21, expvarInt22 expvar.Int
|
||||
)
|
||||
expvarMap1.Init()
|
||||
expvarMap2.Init()
|
||||
expvarInt11.Set(3)
|
||||
expvarInt12.Set(13)
|
||||
expvarInt21.Set(11)
|
||||
expvarInt22.Set(212)
|
||||
expvarMap1.Set("POST", &expvarInt11)
|
||||
expvarMap1.Set("GET", &expvarInt12)
|
||||
expvarMap2.Set("POST", &expvarInt21)
|
||||
expvarMap2.Set("GET", &expvarInt22)
|
||||
expvarMap.Set("404", &expvarMap1)
|
||||
expvarMap.Set("200", &expvarMap2)
|
||||
// Results in the following expvar map:
|
||||
// "http-request-count": {"200": {"POST": 11, "GET": 212}, "404": {"POST": 3, "GET": 13}}
|
||||
|
||||
// Let's see what the scrape would yield, but exclude the memstats metrics.
|
||||
metricStrings := []string{}
|
||||
metric := dto.Metric{}
|
||||
metricChan := make(chan prometheus.Metric)
|
||||
go func() {
|
||||
expvarCollector.Collect(metricChan)
|
||||
close(metricChan)
|
||||
}()
|
||||
for m := range metricChan {
|
||||
if strings.Index(m.Desc().String(), "expvar_memstats") == -1 {
|
||||
metric.Reset()
|
||||
m.Write(&metric)
|
||||
metricStrings = append(metricStrings, metric.String())
|
||||
}
|
||||
}
|
||||
sort.Strings(metricStrings)
|
||||
for _, s := range metricStrings {
|
||||
fmt.Println(strings.TrimRight(s, " "))
|
||||
}
|
||||
// Output:
|
||||
// label:<name:"code" value:"200" > label:<name:"method" value:"GET" > untyped:<value:212 >
|
||||
// label:<name:"code" value:"200" > label:<name:"method" value:"POST" > untyped:<value:11 >
|
||||
// label:<name:"code" value:"404" > label:<name:"method" value:"GET" > untyped:<value:13 >
|
||||
// label:<name:"code" value:"404" > label:<name:"method" value:"POST" > untyped:<value:3 >
|
||||
// untyped:<value:42 >
|
||||
}
|
||||
147
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
Normal file
147
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
Normal file
@@ -0,0 +1,147 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import "hash/fnv"
|
||||
|
||||
// Gauge is a Metric that represents a single numerical value that can
|
||||
// arbitrarily go up and down.
|
||||
//
|
||||
// A Gauge is typically used for measured values like temperatures or current
|
||||
// memory usage, but also "counts" that can go up and down, like the number of
|
||||
// running goroutines.
|
||||
//
|
||||
// To create Gauge instances, use NewGauge.
|
||||
type Gauge interface {
|
||||
Metric
|
||||
Collector
|
||||
|
||||
// Set sets the Gauge to an arbitrary value.
|
||||
Set(float64)
|
||||
// Inc increments the Gauge by 1.
|
||||
Inc()
|
||||
// Dec decrements the Gauge by 1.
|
||||
Dec()
|
||||
// Add adds the given value to the Gauge. (The value can be
|
||||
// negative, resulting in a decrease of the Gauge.)
|
||||
Add(float64)
|
||||
// Sub subtracts the given value from the Gauge. (The value can be
|
||||
// negative, resulting in an increase of the Gauge.)
|
||||
Sub(float64)
|
||||
}
|
||||
|
||||
// GaugeOpts is an alias for Opts. See there for doc comments.
|
||||
type GaugeOpts Opts
|
||||
|
||||
// NewGauge creates a new Gauge based on the provided GaugeOpts.
|
||||
func NewGauge(opts GaugeOpts) Gauge {
|
||||
return newValue(NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
nil,
|
||||
opts.ConstLabels,
|
||||
), GaugeValue, 0)
|
||||
}
|
||||
|
||||
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
|
||||
// Desc, but have different values for their variable labels. This is used if
|
||||
// you want to count the same thing partitioned by various dimensions
|
||||
// (e.g. number of operations queued, partitioned by user and operation
|
||||
// type). Create instances with NewGaugeVec.
|
||||
type GaugeVec struct {
|
||||
MetricVec
|
||||
}
|
||||
|
||||
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
|
||||
// partitioned by the given label names. At least one label name must be
|
||||
// provided.
|
||||
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
|
||||
desc := NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
labelNames,
|
||||
opts.ConstLabels,
|
||||
)
|
||||
return &GaugeVec{
|
||||
MetricVec: MetricVec{
|
||||
children: map[uint64]Metric{},
|
||||
desc: desc,
|
||||
hash: fnv.New64a(),
|
||||
newMetric: func(lvs ...string) Metric {
|
||||
return newValue(desc, GaugeValue, 0, lvs...)
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetricWithLabelValues replaces the method of the same name in
|
||||
// MetricVec. The difference is that this method returns a Gauge and not a
|
||||
// Metric so that no type conversion is required.
|
||||
func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
|
||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
||||
if metric != nil {
|
||||
return metric.(Gauge), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
||||
// difference is that this method returns a Gauge and not a Metric so that no
|
||||
// type conversion is required.
|
||||
func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
|
||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
||||
if metric != nil {
|
||||
return metric.(Gauge), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
||||
// error, WithLabelValues allows shortcuts like
|
||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
||||
func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
|
||||
return m.MetricVec.WithLabelValues(lvs...).(Gauge)
|
||||
}
|
||||
|
||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||
// returned an error. By not returning an error, With allows shortcuts like
|
||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
func (m *GaugeVec) With(labels Labels) Gauge {
|
||||
return m.MetricVec.With(labels).(Gauge)
|
||||
}
|
||||
|
||||
// GaugeFunc is a Gauge whose value is determined at collect time by calling a
|
||||
// provided function.
|
||||
//
|
||||
// To create GaugeFunc instances, use NewGaugeFunc.
|
||||
type GaugeFunc interface {
|
||||
Metric
|
||||
Collector
|
||||
}
|
||||
|
||||
// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
|
||||
// value reported is determined by calling the given function from within the
|
||||
// Write method. Take into account that metric collection may happen
|
||||
// concurrently. If that results in concurrent calls to Write, like in the case
|
||||
// where a GaugeFunc is directly registered with Prometheus, the provided
|
||||
// function must be concurrency-safe.
|
||||
func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
|
||||
return newValueFunc(NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
nil,
|
||||
opts.ConstLabels,
|
||||
), GaugeValue, function)
|
||||
}
|
||||
182
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/gauge_test.go
generated
vendored
Normal file
182
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/gauge_test.go
generated
vendored
Normal file
@@ -0,0 +1,182 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
func listenGaugeStream(vals, result chan float64, done chan struct{}) {
|
||||
var sum float64
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
close(vals)
|
||||
for v := range vals {
|
||||
sum += v
|
||||
}
|
||||
break outer
|
||||
case v := <-vals:
|
||||
sum += v
|
||||
}
|
||||
}
|
||||
result <- sum
|
||||
close(result)
|
||||
}
|
||||
|
||||
func TestGaugeConcurrency(t *testing.T) {
|
||||
it := func(n uint32) bool {
|
||||
mutations := int(n % 10000)
|
||||
concLevel := int(n%15 + 1)
|
||||
|
||||
var start, end sync.WaitGroup
|
||||
start.Add(1)
|
||||
end.Add(concLevel)
|
||||
|
||||
sStream := make(chan float64, mutations*concLevel)
|
||||
result := make(chan float64)
|
||||
done := make(chan struct{})
|
||||
|
||||
go listenGaugeStream(sStream, result, done)
|
||||
go func() {
|
||||
end.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
gge := NewGauge(GaugeOpts{
|
||||
Name: "test_gauge",
|
||||
Help: "no help can be found here",
|
||||
})
|
||||
for i := 0; i < concLevel; i++ {
|
||||
vals := make([]float64, mutations)
|
||||
for j := 0; j < mutations; j++ {
|
||||
vals[j] = rand.Float64() - 0.5
|
||||
}
|
||||
|
||||
go func(vals []float64) {
|
||||
start.Wait()
|
||||
for _, v := range vals {
|
||||
sStream <- v
|
||||
gge.Add(v)
|
||||
}
|
||||
end.Done()
|
||||
}(vals)
|
||||
}
|
||||
start.Done()
|
||||
|
||||
if expected, got := <-result, math.Float64frombits(gge.(*value).valBits); math.Abs(expected-got) > 0.000001 {
|
||||
t.Fatalf("expected approx. %f, got %f", expected, got)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
if err := quick.Check(it, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGaugeVecConcurrency(t *testing.T) {
|
||||
it := func(n uint32) bool {
|
||||
mutations := int(n % 10000)
|
||||
concLevel := int(n%15 + 1)
|
||||
vecLength := int(n%5 + 1)
|
||||
|
||||
var start, end sync.WaitGroup
|
||||
start.Add(1)
|
||||
end.Add(concLevel)
|
||||
|
||||
sStreams := make([]chan float64, vecLength)
|
||||
results := make([]chan float64, vecLength)
|
||||
done := make(chan struct{})
|
||||
|
||||
for i := 0; i < vecLength; i++ {
|
||||
sStreams[i] = make(chan float64, mutations*concLevel)
|
||||
results[i] = make(chan float64)
|
||||
go listenGaugeStream(sStreams[i], results[i], done)
|
||||
}
|
||||
|
||||
go func() {
|
||||
end.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
gge := NewGaugeVec(
|
||||
GaugeOpts{
|
||||
Name: "test_gauge",
|
||||
Help: "no help can be found here",
|
||||
},
|
||||
[]string{"label"},
|
||||
)
|
||||
for i := 0; i < concLevel; i++ {
|
||||
vals := make([]float64, mutations)
|
||||
pick := make([]int, mutations)
|
||||
for j := 0; j < mutations; j++ {
|
||||
vals[j] = rand.Float64() - 0.5
|
||||
pick[j] = rand.Intn(vecLength)
|
||||
}
|
||||
|
||||
go func(vals []float64) {
|
||||
start.Wait()
|
||||
for i, v := range vals {
|
||||
sStreams[pick[i]] <- v
|
||||
gge.WithLabelValues(string('A' + pick[i])).Add(v)
|
||||
}
|
||||
end.Done()
|
||||
}(vals)
|
||||
}
|
||||
start.Done()
|
||||
|
||||
for i := range sStreams {
|
||||
if expected, got := <-results[i], math.Float64frombits(gge.WithLabelValues(string('A'+i)).(*value).valBits); math.Abs(expected-got) > 0.000001 {
|
||||
t.Fatalf("expected approx. %f, got %f", expected, got)
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
if err := quick.Check(it, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGaugeFunc(t *testing.T) {
|
||||
gf := NewGaugeFunc(
|
||||
GaugeOpts{
|
||||
Name: "test_name",
|
||||
Help: "test help",
|
||||
ConstLabels: Labels{"a": "1", "b": "2"},
|
||||
},
|
||||
func() float64 { return 3.1415 },
|
||||
)
|
||||
|
||||
if expected, got := `Desc{fqName: "test_name", help: "test help", constLabels: {a="1",b="2"}, variableLabels: []}`, gf.Desc().String(); expected != got {
|
||||
t.Errorf("expected %q, got %q", expected, got)
|
||||
}
|
||||
|
||||
m := &dto.Metric{}
|
||||
gf.Write(m)
|
||||
|
||||
if expected, got := `label:<name:"a" value:"1" > label:<name:"b" value:"2" > gauge:<value:3.1415 > `, m.String(); expected != got {
|
||||
t.Errorf("expected %q, got %q", expected, got)
|
||||
}
|
||||
}
|
||||
31
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
Normal file
31
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
)
|
||||
|
||||
type goCollector struct {
|
||||
goroutines Gauge
|
||||
}
|
||||
|
||||
// NewGoCollector returns a collector which exports metrics about the current
|
||||
// go process.
|
||||
func NewGoCollector() *goCollector {
|
||||
return &goCollector{
|
||||
goroutines: NewGauge(GaugeOpts{
|
||||
Name: "process_goroutines",
|
||||
Help: "Number of goroutines that currently exist.",
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// Describe returns all descriptions of the collector.
|
||||
func (c *goCollector) Describe(ch chan<- *Desc) {
|
||||
ch <- c.goroutines.Desc()
|
||||
}
|
||||
|
||||
// Collect returns the current state of all metrics of the collector.
|
||||
func (c *goCollector) Collect(ch chan<- Metric) {
|
||||
c.goroutines.Set(float64(runtime.NumGoroutine()))
|
||||
ch <- c.goroutines
|
||||
}
|
||||
58
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/go_collector_test.go
generated
vendored
Normal file
58
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/go_collector_test.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
func TestGoCollector(t *testing.T) {
|
||||
var (
|
||||
c = NewGoCollector()
|
||||
ch = make(chan Metric)
|
||||
waitc = make(chan struct{})
|
||||
closec = make(chan struct{})
|
||||
old = -1
|
||||
)
|
||||
defer close(closec)
|
||||
|
||||
go func() {
|
||||
c.Collect(ch)
|
||||
go func(c <-chan struct{}) {
|
||||
<-c
|
||||
}(closec)
|
||||
<-waitc
|
||||
c.Collect(ch)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case metric := <-ch:
|
||||
switch m := metric.(type) {
|
||||
// Attention, this also catches Counter...
|
||||
case Gauge:
|
||||
pb := &dto.Metric{}
|
||||
m.Write(pb)
|
||||
|
||||
if old == -1 {
|
||||
old = int(pb.GetGauge().GetValue())
|
||||
close(waitc)
|
||||
continue
|
||||
}
|
||||
|
||||
if diff := int(pb.GetGauge().GetValue()) - old; diff != 1 {
|
||||
// TODO: This is flaky in highly concurrent situations.
|
||||
t.Errorf("want 1 new goroutine, got %d", diff)
|
||||
}
|
||||
|
||||
return
|
||||
default:
|
||||
t.Errorf("want type Gauge, got %s", reflect.TypeOf(metric))
|
||||
}
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Fatalf("expected collect timed out")
|
||||
}
|
||||
}
|
||||
}
|
||||
322
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
Normal file
322
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
Normal file
@@ -0,0 +1,322 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var instLabels = []string{"method", "code"}
|
||||
|
||||
type nower interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
type nowFunc func() time.Time
|
||||
|
||||
func (n nowFunc) Now() time.Time {
|
||||
return n()
|
||||
}
|
||||
|
||||
var now nower = nowFunc(func() time.Time {
|
||||
return time.Now()
|
||||
})
|
||||
|
||||
func nowSeries(t ...time.Time) nower {
|
||||
return nowFunc(func() time.Time {
|
||||
defer func() {
|
||||
t = t[1:]
|
||||
}()
|
||||
|
||||
return t[0]
|
||||
})
|
||||
}
|
||||
|
||||
// InstrumentHandler wraps the given HTTP handler for instrumentation. It
|
||||
// registers four metric collectors (if not already done) and reports http
|
||||
// metrics to the (newly or already) registered collectors: http_requests_total
|
||||
// (CounterVec), http_request_duration_microseconds (Summary),
|
||||
// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
|
||||
// has a constant label named "handler" with the provided handlerName as
|
||||
// value. http_requests_total is a metric vector partitioned by HTTP method
|
||||
// (label name "method") and HTTP status code (label name "code").
|
||||
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
|
||||
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
|
||||
}
|
||||
|
||||
// InstrumentHandlerFunc wraps the given function for instrumentation. It
|
||||
// otherwise works in the same way as InstrumentHandler.
|
||||
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
||||
return InstrumentHandlerFuncWithOpts(
|
||||
SummaryOpts{
|
||||
Subsystem: "http",
|
||||
ConstLabels: Labels{"handler": handlerName},
|
||||
},
|
||||
handlerFunc,
|
||||
)
|
||||
}
|
||||
|
||||
// InstrumentHandlerWithOpts works like InstrumentHandler but provides more
|
||||
// flexibility (at the cost of a more complex call syntax). As
|
||||
// InstrumentHandler, this function registers four metric collectors, but it
|
||||
// uses the provided SummaryOpts to create them. However, the fields "Name" and
|
||||
// "Help" in the SummaryOpts are ignored. "Name" is replaced by
|
||||
// "requests_total", "request_duration_microseconds", "request_size_bytes", and
|
||||
// "response_size_bytes", respectively. "Help" is replaced by an appropriate
|
||||
// help string. The names of the variable labels of the http_requests_total
|
||||
// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
|
||||
//
|
||||
// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
|
||||
// behavior of InstrumentHandler:
|
||||
//
|
||||
// prometheus.InstrumentHandlerWithOpts(
|
||||
// prometheus.SummaryOpts{
|
||||
// Subsystem: "http",
|
||||
// ConstLabels: prometheus.Labels{"handler": handlerName},
|
||||
// },
|
||||
// handler,
|
||||
// )
|
||||
//
|
||||
// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
|
||||
// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
|
||||
// and all its fields are set to the equally named fields in the provided
|
||||
// SummaryOpts.
|
||||
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
|
||||
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
|
||||
}
|
||||
|
||||
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc but provides
|
||||
// more flexibility (at the cost of a more complex call syntax). See
|
||||
// InstrumentHandlerWithOpts for details how the provided SummaryOpts are used.
|
||||
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
||||
reqCnt := NewCounterVec(
|
||||
CounterOpts{
|
||||
Namespace: opts.Namespace,
|
||||
Subsystem: opts.Subsystem,
|
||||
Name: "requests_total",
|
||||
Help: "Total number of HTTP requests made.",
|
||||
ConstLabels: opts.ConstLabels,
|
||||
},
|
||||
instLabels,
|
||||
)
|
||||
|
||||
opts.Name = "request_duration_microseconds"
|
||||
opts.Help = "The HTTP request latencies in microseconds."
|
||||
reqDur := NewSummary(opts)
|
||||
|
||||
opts.Name = "request_size_bytes"
|
||||
opts.Help = "The HTTP request sizes in bytes."
|
||||
reqSz := NewSummary(opts)
|
||||
|
||||
opts.Name = "response_size_bytes"
|
||||
opts.Help = "The HTTP response sizes in bytes."
|
||||
resSz := NewSummary(opts)
|
||||
|
||||
regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec)
|
||||
regReqDur := MustRegisterOrGet(reqDur).(Summary)
|
||||
regReqSz := MustRegisterOrGet(reqSz).(Summary)
|
||||
regResSz := MustRegisterOrGet(resSz).(Summary)
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
now := time.Now()
|
||||
|
||||
delegate := &responseWriterDelegator{ResponseWriter: w}
|
||||
out := make(chan int)
|
||||
urlLen := 0
|
||||
if r.URL != nil {
|
||||
urlLen = len(r.URL.String())
|
||||
}
|
||||
go computeApproximateRequestSize(r, out, urlLen)
|
||||
handlerFunc(delegate, r)
|
||||
|
||||
elapsed := float64(time.Since(now)) / float64(time.Microsecond)
|
||||
|
||||
method := sanitizeMethod(r.Method)
|
||||
code := sanitizeCode(delegate.status)
|
||||
regReqCnt.WithLabelValues(method, code).Inc()
|
||||
regReqDur.Observe(elapsed)
|
||||
regResSz.Observe(float64(delegate.written))
|
||||
regReqSz.Observe(float64(<-out))
|
||||
})
|
||||
}
|
||||
|
||||
func computeApproximateRequestSize(r *http.Request, out chan int, s int) {
|
||||
s += len(r.Method)
|
||||
s += len(r.Proto)
|
||||
for name, values := range r.Header {
|
||||
s += len(name)
|
||||
for _, value := range values {
|
||||
s += len(value)
|
||||
}
|
||||
}
|
||||
s += len(r.Host)
|
||||
|
||||
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
|
||||
|
||||
if r.ContentLength != -1 {
|
||||
s += int(r.ContentLength)
|
||||
}
|
||||
out <- s
|
||||
}
|
||||
|
||||
type responseWriterDelegator struct {
|
||||
http.ResponseWriter
|
||||
|
||||
handler, method string
|
||||
status int
|
||||
written int
|
||||
wroteHeader bool
|
||||
}
|
||||
|
||||
func (r *responseWriterDelegator) WriteHeader(code int) {
|
||||
r.status = code
|
||||
r.wroteHeader = true
|
||||
r.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
func (r *responseWriterDelegator) Write(b []byte) (int, error) {
|
||||
if !r.wroteHeader {
|
||||
r.WriteHeader(http.StatusOK)
|
||||
}
|
||||
n, err := r.ResponseWriter.Write(b)
|
||||
r.written += n
|
||||
return n, err
|
||||
}
|
||||
|
||||
func sanitizeMethod(m string) string {
|
||||
switch m {
|
||||
case "GET", "get":
|
||||
return "get"
|
||||
case "PUT", "put":
|
||||
return "put"
|
||||
case "HEAD", "head":
|
||||
return "head"
|
||||
case "POST", "post":
|
||||
return "post"
|
||||
case "DELETE", "delete":
|
||||
return "delete"
|
||||
case "CONNECT", "connect":
|
||||
return "connect"
|
||||
case "OPTIONS", "options":
|
||||
return "options"
|
||||
case "NOTIFY", "notify":
|
||||
return "notify"
|
||||
default:
|
||||
return strings.ToLower(m)
|
||||
}
|
||||
}
|
||||
|
||||
func sanitizeCode(s int) string {
|
||||
switch s {
|
||||
case 100:
|
||||
return "100"
|
||||
case 101:
|
||||
return "101"
|
||||
|
||||
case 200:
|
||||
return "200"
|
||||
case 201:
|
||||
return "201"
|
||||
case 202:
|
||||
return "202"
|
||||
case 203:
|
||||
return "203"
|
||||
case 204:
|
||||
return "204"
|
||||
case 205:
|
||||
return "205"
|
||||
case 206:
|
||||
return "206"
|
||||
|
||||
case 300:
|
||||
return "300"
|
||||
case 301:
|
||||
return "301"
|
||||
case 302:
|
||||
return "302"
|
||||
case 304:
|
||||
return "304"
|
||||
case 305:
|
||||
return "305"
|
||||
case 307:
|
||||
return "307"
|
||||
|
||||
case 400:
|
||||
return "400"
|
||||
case 401:
|
||||
return "401"
|
||||
case 402:
|
||||
return "402"
|
||||
case 403:
|
||||
return "403"
|
||||
case 404:
|
||||
return "404"
|
||||
case 405:
|
||||
return "405"
|
||||
case 406:
|
||||
return "406"
|
||||
case 407:
|
||||
return "407"
|
||||
case 408:
|
||||
return "408"
|
||||
case 409:
|
||||
return "409"
|
||||
case 410:
|
||||
return "410"
|
||||
case 411:
|
||||
return "411"
|
||||
case 412:
|
||||
return "412"
|
||||
case 413:
|
||||
return "413"
|
||||
case 414:
|
||||
return "414"
|
||||
case 415:
|
||||
return "415"
|
||||
case 416:
|
||||
return "416"
|
||||
case 417:
|
||||
return "417"
|
||||
case 418:
|
||||
return "418"
|
||||
|
||||
case 500:
|
||||
return "500"
|
||||
case 501:
|
||||
return "501"
|
||||
case 502:
|
||||
return "502"
|
||||
case 503:
|
||||
return "503"
|
||||
case 504:
|
||||
return "504"
|
||||
case 505:
|
||||
return "505"
|
||||
|
||||
case 428:
|
||||
return "428"
|
||||
case 429:
|
||||
return "429"
|
||||
case 431:
|
||||
return "431"
|
||||
case 511:
|
||||
return "511"
|
||||
|
||||
default:
|
||||
return strconv.Itoa(s)
|
||||
}
|
||||
}
|
||||
121
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/http_test.go
generated
vendored
Normal file
121
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/http_test.go
generated
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
type respBody string
|
||||
|
||||
func (b respBody) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusTeapot)
|
||||
w.Write([]byte(b))
|
||||
}
|
||||
|
||||
func TestInstrumentHandler(t *testing.T) {
|
||||
defer func(n nower) {
|
||||
now = n.(nower)
|
||||
}(now)
|
||||
|
||||
instant := time.Now()
|
||||
end := instant.Add(30 * time.Second)
|
||||
now = nowSeries(instant, end)
|
||||
respBody := respBody("Howdy there!")
|
||||
|
||||
hndlr := InstrumentHandler("test-handler", respBody)
|
||||
|
||||
opts := SummaryOpts{
|
||||
Subsystem: "http",
|
||||
ConstLabels: Labels{"handler": "test-handler"},
|
||||
}
|
||||
|
||||
reqCnt := MustRegisterOrGet(NewCounterVec(
|
||||
CounterOpts{
|
||||
Namespace: opts.Namespace,
|
||||
Subsystem: opts.Subsystem,
|
||||
Name: "requests_total",
|
||||
Help: "Total number of HTTP requests made.",
|
||||
ConstLabels: opts.ConstLabels,
|
||||
},
|
||||
instLabels,
|
||||
)).(*CounterVec)
|
||||
|
||||
opts.Name = "request_duration_microseconds"
|
||||
opts.Help = "The HTTP request latencies in microseconds."
|
||||
reqDur := MustRegisterOrGet(NewSummary(opts)).(Summary)
|
||||
|
||||
opts.Name = "request_size_bytes"
|
||||
opts.Help = "The HTTP request sizes in bytes."
|
||||
MustRegisterOrGet(NewSummary(opts))
|
||||
|
||||
opts.Name = "response_size_bytes"
|
||||
opts.Help = "The HTTP response sizes in bytes."
|
||||
MustRegisterOrGet(NewSummary(opts))
|
||||
|
||||
reqCnt.Reset()
|
||||
|
||||
resp := httptest.NewRecorder()
|
||||
req := &http.Request{
|
||||
Method: "GET",
|
||||
}
|
||||
|
||||
hndlr.ServeHTTP(resp, req)
|
||||
|
||||
if resp.Code != http.StatusTeapot {
|
||||
t.Fatalf("expected status %d, got %d", http.StatusTeapot, resp.Code)
|
||||
}
|
||||
if string(resp.Body.Bytes()) != "Howdy there!" {
|
||||
t.Fatalf("expected body %s, got %s", "Howdy there!", string(resp.Body.Bytes()))
|
||||
}
|
||||
|
||||
out := &dto.Metric{}
|
||||
reqDur.Write(out)
|
||||
if want, got := "test-handler", out.Label[0].GetValue(); want != got {
|
||||
t.Errorf("want label value %q in reqDur, got %q", want, got)
|
||||
}
|
||||
if want, got := uint64(1), out.Summary.GetSampleCount(); want != got {
|
||||
t.Errorf("want sample count %d in reqDur, got %d", want, got)
|
||||
}
|
||||
|
||||
out.Reset()
|
||||
if want, got := 1, len(reqCnt.children); want != got {
|
||||
t.Errorf("want %d children in reqCnt, got %d", want, got)
|
||||
}
|
||||
cnt, err := reqCnt.GetMetricWithLabelValues("get", "418")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cnt.Write(out)
|
||||
if want, got := "418", out.Label[0].GetValue(); want != got {
|
||||
t.Errorf("want label value %q in reqCnt, got %q", want, got)
|
||||
}
|
||||
if want, got := "test-handler", out.Label[1].GetValue(); want != got {
|
||||
t.Errorf("want label value %q in reqCnt, got %q", want, got)
|
||||
}
|
||||
if want, got := "get", out.Label[2].GetValue(); want != got {
|
||||
t.Errorf("want label value %q in reqCnt, got %q", want, got)
|
||||
}
|
||||
if out.Counter == nil {
|
||||
t.Fatal("expected non-nil counter in reqCnt")
|
||||
}
|
||||
if want, got := 1., out.Counter.GetValue(); want != got {
|
||||
t.Errorf("want reqCnt of %f, got %f", want, got)
|
||||
}
|
||||
}
|
||||
164
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
Normal file
164
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
Normal file
@@ -0,0 +1,164 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// A Metric models a single sample value with its meta data being exported to
|
||||
// Prometheus. Implementers of Metric in this package inclued Gauge, Counter,
|
||||
// Untyped, and Summary. Users can implement their own Metric types, but that
|
||||
// should be rarely needed. See the example for SelfCollector, which is also an
|
||||
// example for a user-implemented Metric.
|
||||
type Metric interface {
|
||||
// Desc returns the descriptor for the Metric. This method idempotently
|
||||
// returns the same descriptor throughout the lifetime of the
|
||||
// Metric. The returned descriptor is immutable by contract. A Metric
|
||||
// unable to describe itself must return an invalid descriptor (created
|
||||
// with NewInvalidDesc).
|
||||
Desc() *Desc
|
||||
// Write encodes the Metric into a "Metric" Protocol Buffer data
|
||||
// transmission object.
|
||||
//
|
||||
// Implementers of custom Metric types must observe concurrency safety
|
||||
// as reads of this metric may occur at any time, and any blocking
|
||||
// occurs at the expense of total performance of rendering all
|
||||
// registered metrics. Ideally Metric implementations should support
|
||||
// concurrent readers.
|
||||
//
|
||||
// The Prometheus client library attempts to minimize memory allocations
|
||||
// and will provide a pre-existing reset dto.Metric pointer. Prometheus
|
||||
// may recycle the dto.Metric proto message, so Metric implementations
|
||||
// should just populate the provided dto.Metric and then should not keep
|
||||
// any reference to it.
|
||||
//
|
||||
// While populating dto.Metric, labels must be sorted lexicographically.
|
||||
// (Implementers may find LabelPairSorter useful for that.)
|
||||
Write(*dto.Metric) error
|
||||
}
|
||||
|
||||
// Opts bundles the options for creating most Metric types. Each metric
|
||||
// implementation XXX has its own XXXOpts type, but in most cases, it is just be
|
||||
// an alias of this type (which might change when the requirement arises.)
|
||||
//
|
||||
// It is mandatory to set Name and Help to a non-empty string. All other fields
|
||||
// are optional and can safely be left at their zero value.
|
||||
type Opts struct {
|
||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
||||
// name of the Metric (created by joining these components with
|
||||
// "_"). Only Name is mandatory, the others merely help structuring the
|
||||
// name. Note that the fully-qualified name of the metric must be a
|
||||
// valid Prometheus metric name.
|
||||
Namespace string
|
||||
Subsystem string
|
||||
Name string
|
||||
|
||||
// Help provides information about this metric. Mandatory!
|
||||
//
|
||||
// Metrics with the same fully-qualified name must have the same Help
|
||||
// string.
|
||||
Help string
|
||||
|
||||
// ConstLabels are used to attach fixed labels to this metric. Metrics
|
||||
// with the same fully-qualified name must have the same label names in
|
||||
// their ConstLabels.
|
||||
//
|
||||
// Note that in most cases, labels have a value that varies during the
|
||||
// lifetime of a process. Those labels are usually managed with a metric
|
||||
// vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels
|
||||
// serve only special purposes. One is for the special case where the
|
||||
// value of a label does not change during the lifetime of a process,
|
||||
// e.g. if the revision of the running binary is put into a
|
||||
// label. Another, more advanced purpose is if more than one Collector
|
||||
// needs to collect Metrics with the same fully-qualified name. In that
|
||||
// case, those Metrics must differ in the values of their
|
||||
// ConstLabels. See the Collector examples.
|
||||
//
|
||||
// If the value of a label never changes (not even between binaries),
|
||||
// that label most likely should not be a label at all (but part of the
|
||||
// metric name).
|
||||
ConstLabels Labels
|
||||
}
|
||||
|
||||
// BuildFQName joins the given three name components by "_". Empty name
|
||||
// components are ignored. If the name parameter itself is empty, an empty
|
||||
// string is returned, no matter what. Metric implementations included in this
|
||||
// library use this function internally to generate the fully-qualified metric
|
||||
// name from the name component in their Opts. Users of the library will only
|
||||
// need this function if they implement their own Metric or instantiate a Desc
|
||||
// (with NewDesc) directly.
|
||||
func BuildFQName(namespace, subsystem, name string) string {
|
||||
if name == "" {
|
||||
return ""
|
||||
}
|
||||
switch {
|
||||
case namespace != "" && subsystem != "":
|
||||
return strings.Join([]string{namespace, subsystem, name}, "_")
|
||||
case namespace != "":
|
||||
return strings.Join([]string{namespace, name}, "_")
|
||||
case subsystem != "":
|
||||
return strings.Join([]string{subsystem, name}, "_")
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// LabelPairSorter implements sort.Interface. It is used to sort a slice of
|
||||
// dto.LabelPair pointers. This is useful for implementing the Write method of
|
||||
// custom metrics.
|
||||
type LabelPairSorter []*dto.LabelPair
|
||||
|
||||
func (s LabelPairSorter) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s LabelPairSorter) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s LabelPairSorter) Less(i, j int) bool {
|
||||
return s[i].GetName() < s[j].GetName()
|
||||
}
|
||||
|
||||
type hashSorter []uint64
|
||||
|
||||
func (s hashSorter) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s hashSorter) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s hashSorter) Less(i, j int) bool {
|
||||
return s[i] < s[j]
|
||||
}
|
||||
|
||||
type invalidMetric struct {
|
||||
desc *Desc
|
||||
err error
|
||||
}
|
||||
|
||||
// NewInvalidMetric returns a metric whose Write method always returns the
|
||||
// provided error. It is useful if a Collector finds itself unable to collect
|
||||
// a metric and wishes to report an error to the registry.
|
||||
func NewInvalidMetric(desc *Desc, err error) Metric {
|
||||
return &invalidMetric{desc, err}
|
||||
}
|
||||
|
||||
func (m *invalidMetric) Desc() *Desc { return m.desc }
|
||||
|
||||
func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
|
||||
35
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/metric_test.go
generated
vendored
Normal file
35
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/metric_test.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestBuildFQName(t *testing.T) {
|
||||
scenarios := []struct{ namespace, subsystem, name, result string }{
|
||||
{"a", "b", "c", "a_b_c"},
|
||||
{"", "b", "c", "b_c"},
|
||||
{"a", "", "c", "a_c"},
|
||||
{"", "", "c", "c"},
|
||||
{"a", "b", "", ""},
|
||||
{"a", "", "", ""},
|
||||
{"", "b", "", ""},
|
||||
{" ", "", "", ""},
|
||||
}
|
||||
|
||||
for i, s := range scenarios {
|
||||
if want, got := s.result, BuildFQName(s.namespace, s.subsystem, s.name); want != got {
|
||||
t.Errorf("%d. want %s, got %s", i, want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
102
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
Normal file
102
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
type processCollector struct {
|
||||
pid int
|
||||
collectFn func(chan<- Metric)
|
||||
pidFn func() (int, error)
|
||||
cpuTotal Counter
|
||||
openFDs, maxFDs Gauge
|
||||
vsize, rss Gauge
|
||||
startTime Gauge
|
||||
}
|
||||
|
||||
// NewProcessCollector returns a collector which exports the current state of
|
||||
// process metrics including cpu, memory and file descriptor usage as well as
|
||||
// the process start time for the given process id under the given namespace.
|
||||
func NewProcessCollector(pid int, namespace string) *processCollector {
|
||||
return NewProcessCollectorPIDFn(
|
||||
func() (int, error) { return pid, nil },
|
||||
namespace,
|
||||
)
|
||||
}
|
||||
|
||||
// NewProcessCollectorPIDFn returns a collector which exports the current state
|
||||
// of process metrics including cpu, memory and file descriptor usage as well
|
||||
// as the process start time under the given namespace. The given pidFn is
|
||||
// called on each collect and is used to determine the process to export
|
||||
// metrics for.
|
||||
func NewProcessCollectorPIDFn(
|
||||
pidFn func() (int, error),
|
||||
namespace string,
|
||||
) *processCollector {
|
||||
c := processCollector{
|
||||
pidFn: pidFn,
|
||||
collectFn: func(chan<- Metric) {},
|
||||
|
||||
cpuTotal: NewCounter(CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "process_cpu_seconds_total",
|
||||
Help: "Total user and system CPU time spent in seconds.",
|
||||
}),
|
||||
openFDs: NewGauge(GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "process_open_fds",
|
||||
Help: "Number of open file descriptors.",
|
||||
}),
|
||||
maxFDs: NewGauge(GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "process_max_fds",
|
||||
Help: "Maximum number of open file descriptors.",
|
||||
}),
|
||||
vsize: NewGauge(GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "process_virtual_memory_bytes",
|
||||
Help: "Virtual memory size in bytes.",
|
||||
}),
|
||||
rss: NewGauge(GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "process_resident_memory_bytes",
|
||||
Help: "Resident memory size in bytes.",
|
||||
}),
|
||||
startTime: NewGauge(GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "process_start_time_seconds",
|
||||
Help: "Start time of the process since unix epoch in seconds.",
|
||||
}),
|
||||
}
|
||||
|
||||
// Set up process metric collection if supported by the runtime.
|
||||
if processCollectSupported() {
|
||||
c.collectFn = c.processCollect
|
||||
}
|
||||
|
||||
return &c
|
||||
}
|
||||
|
||||
// Describe returns all descriptions of the collector.
|
||||
func (c *processCollector) Describe(ch chan<- *Desc) {
|
||||
ch <- c.cpuTotal.Desc()
|
||||
ch <- c.openFDs.Desc()
|
||||
ch <- c.maxFDs.Desc()
|
||||
ch <- c.vsize.Desc()
|
||||
ch <- c.rss.Desc()
|
||||
ch <- c.startTime.Desc()
|
||||
}
|
||||
|
||||
// Collect returns the current state of all metrics of the collector.
|
||||
func (c *processCollector) Collect(ch chan<- Metric) {
|
||||
c.collectFn(ch)
|
||||
}
|
||||
84
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector_procfs.go
generated
vendored
Normal file
84
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector_procfs.go
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux plan9 solaris
|
||||
|
||||
package prometheus
|
||||
|
||||
import "github.com/prometheus/procfs"
|
||||
|
||||
func processCollectSupported() bool {
|
||||
if _, err := procfs.NewStat(); err == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||
pid, err := c.pidFn()
|
||||
if err != nil {
|
||||
c.reportCollectErrors(ch, err)
|
||||
return
|
||||
}
|
||||
|
||||
p, err := procfs.NewProc(pid)
|
||||
if err != nil {
|
||||
c.reportCollectErrors(ch, err)
|
||||
return
|
||||
}
|
||||
|
||||
if stat, err := p.NewStat(); err != nil {
|
||||
// Report collect errors for metrics depending on stat.
|
||||
ch <- NewInvalidMetric(c.vsize.Desc(), err)
|
||||
ch <- NewInvalidMetric(c.rss.Desc(), err)
|
||||
ch <- NewInvalidMetric(c.startTime.Desc(), err)
|
||||
ch <- NewInvalidMetric(c.cpuTotal.Desc(), err)
|
||||
} else {
|
||||
c.cpuTotal.Set(stat.CPUTime())
|
||||
ch <- c.cpuTotal
|
||||
c.vsize.Set(float64(stat.VirtualMemory()))
|
||||
ch <- c.vsize
|
||||
c.rss.Set(float64(stat.ResidentMemory()))
|
||||
ch <- c.rss
|
||||
|
||||
if startTime, err := stat.StartTime(); err != nil {
|
||||
ch <- NewInvalidMetric(c.startTime.Desc(), err)
|
||||
} else {
|
||||
c.startTime.Set(startTime)
|
||||
ch <- c.startTime
|
||||
}
|
||||
}
|
||||
|
||||
if fds, err := p.FileDescriptorsLen(); err != nil {
|
||||
ch <- NewInvalidMetric(c.openFDs.Desc(), err)
|
||||
} else {
|
||||
c.openFDs.Set(float64(fds))
|
||||
ch <- c.openFDs
|
||||
}
|
||||
|
||||
if limits, err := p.NewLimits(); err != nil {
|
||||
ch <- NewInvalidMetric(c.maxFDs.Desc(), err)
|
||||
} else {
|
||||
c.maxFDs.Set(float64(limits.OpenFiles))
|
||||
ch <- c.maxFDs
|
||||
}
|
||||
}
|
||||
|
||||
func (c *processCollector) reportCollectErrors(ch chan<- Metric, err error) {
|
||||
ch <- NewInvalidMetric(c.cpuTotal.Desc(), err)
|
||||
ch <- NewInvalidMetric(c.openFDs.Desc(), err)
|
||||
ch <- NewInvalidMetric(c.maxFDs.Desc(), err)
|
||||
ch <- NewInvalidMetric(c.vsize.Desc(), err)
|
||||
ch <- NewInvalidMetric(c.rss.Desc(), err)
|
||||
ch <- NewInvalidMetric(c.startTime.Desc(), err)
|
||||
}
|
||||
24
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector_rest.go
generated
vendored
Normal file
24
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector_rest.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !linux,!plan9,!solaris
|
||||
|
||||
package prometheus
|
||||
|
||||
func processCollectSupported() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||
panic("unreachable")
|
||||
}
|
||||
54
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector_test.go
generated
vendored
Normal file
54
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector_test.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/procfs"
|
||||
)
|
||||
|
||||
func TestProcessCollector(t *testing.T) {
|
||||
if _, err := procfs.Self(); err != nil {
|
||||
t.Skipf("skipping TestProcessCollector, procfs not available: %s", err)
|
||||
}
|
||||
|
||||
registry := newRegistry()
|
||||
registry.Register(NewProcessCollector(os.Getpid(), ""))
|
||||
registry.Register(NewProcessCollectorPIDFn(
|
||||
func() (int, error) { return os.Getpid(), nil }, "foobar"))
|
||||
|
||||
s := httptest.NewServer(InstrumentHandler("prometheus", registry))
|
||||
defer s.Close()
|
||||
r, err := http.Get(s.URL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer r.Body.Close()
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, re := range []*regexp.Regexp{
|
||||
regexp.MustCompile("process_cpu_seconds_total [0-9]"),
|
||||
regexp.MustCompile("process_max_fds [0-9]{2,}"),
|
||||
regexp.MustCompile("process_open_fds [1-9]"),
|
||||
regexp.MustCompile("process_virtual_memory_bytes [1-9]"),
|
||||
regexp.MustCompile("process_resident_memory_bytes [1-9]"),
|
||||
regexp.MustCompile("process_start_time_seconds [0-9.]{10,}"),
|
||||
regexp.MustCompile("foobar_process_cpu_seconds_total [0-9]"),
|
||||
regexp.MustCompile("foobar_process_max_fds [0-9]{2,}"),
|
||||
regexp.MustCompile("foobar_process_open_fds [1-9]"),
|
||||
regexp.MustCompile("foobar_process_virtual_memory_bytes [1-9]"),
|
||||
regexp.MustCompile("foobar_process_resident_memory_bytes [1-9]"),
|
||||
regexp.MustCompile("foobar_process_start_time_seconds [0-9.]{10,}"),
|
||||
} {
|
||||
if !re.Match(body) {
|
||||
t.Errorf("want body to match %s\n%s", re, body)
|
||||
}
|
||||
}
|
||||
}
|
||||
721
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
Normal file
721
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
Normal file
@@ -0,0 +1,721 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Copyright (c) 2013, The Prometheus Authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be found
|
||||
// in the LICENSE file.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"code.google.com/p/goprotobuf/proto"
|
||||
|
||||
"github.com/prometheus/client_golang/_vendor/goautoneg"
|
||||
"github.com/prometheus/client_golang/model"
|
||||
"github.com/prometheus/client_golang/text"
|
||||
)
|
||||
|
||||
var (
|
||||
defRegistry = newDefaultRegistry()
|
||||
errAlreadyReg = errors.New("duplicate metrics collector registration attempted")
|
||||
)
|
||||
|
||||
// Constants relevant to the HTTP interface.
|
||||
const (
|
||||
// APIVersion is the version of the format of the exported data. This
|
||||
// will match this library's version, which subscribes to the Semantic
|
||||
// Versioning scheme.
|
||||
APIVersion = "0.0.4"
|
||||
|
||||
// DelimitedTelemetryContentType is the content type set on telemetry
|
||||
// data responses in delimited protobuf format.
|
||||
DelimitedTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`
|
||||
// TextTelemetryContentType is the content type set on telemetry data
|
||||
// responses in text format.
|
||||
TextTelemetryContentType = `text/plain; version=` + APIVersion
|
||||
// ProtoTextTelemetryContentType is the content type set on telemetry
|
||||
// data responses in protobuf text format. (Only used for debugging.)
|
||||
ProtoTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`
|
||||
// ProtoCompactTextTelemetryContentType is the content type set on
|
||||
// telemetry data responses in protobuf compact text format. (Only used
|
||||
// for debugging.)
|
||||
ProtoCompactTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`
|
||||
|
||||
// Constants for object pools.
|
||||
numBufs = 4
|
||||
numMetricFamilies = 1000
|
||||
numMetrics = 10000
|
||||
|
||||
// Capacity for the channel to collect metrics and descriptors.
|
||||
capMetricChan = 1000
|
||||
capDescChan = 10
|
||||
|
||||
contentTypeHeader = "Content-Type"
|
||||
contentLengthHeader = "Content-Length"
|
||||
contentEncodingHeader = "Content-Encoding"
|
||||
|
||||
acceptEncodingHeader = "Accept-Encoding"
|
||||
acceptHeader = "Accept"
|
||||
)
|
||||
|
||||
// Handler returns the HTTP handler for the global Prometheus registry. It is
|
||||
// already instrumented with InstrumentHandler (using "prometheus" as handler
|
||||
// name). Usually the handler is used to handle the "/metrics" endpoint.
|
||||
func Handler() http.Handler {
|
||||
return InstrumentHandler("prometheus", defRegistry)
|
||||
}
|
||||
|
||||
// UninstrumentedHandler works in the same way as Handler, but the returned HTTP
|
||||
// handler is not instrumented. This is useful if no instrumentation is desired
|
||||
// (for whatever reason) or if the instrumentation has to happen with a
|
||||
// different handler name (or with a different instrumentation approach
|
||||
// altogether). See the InstrumentHandler example.
|
||||
func UninstrumentedHandler() http.Handler {
|
||||
return defRegistry
|
||||
}
|
||||
|
||||
// Register registers a new Collector to be included in metrics collection. It
|
||||
// returns an error if the descriptors provided by the Collector are invalid or
|
||||
// if they - in combination with descriptors of already registered Collectors -
|
||||
// do not fulfill the consistency and uniqueness criteria described in the Desc
|
||||
// documentation.
|
||||
//
|
||||
// Do not register the same Collector multiple times concurrently. (Registering
|
||||
// the same Collector twice would result in an error anyway, but on top of that,
|
||||
// it is not safe to do so concurrently.)
|
||||
func Register(m Collector) error {
|
||||
_, err := defRegistry.Register(m)
|
||||
return err
|
||||
}
|
||||
|
||||
// MustRegister works like Register but panics where Register would have
|
||||
// returned an error.
|
||||
func MustRegister(m Collector) {
|
||||
err := Register(m)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterOrGet works like Register but does not return an error if a Collector
|
||||
// is registered that equals a previously registered Collector. (Two Collectors
|
||||
// are considered equal if their Describe method yields the same set of
|
||||
// descriptors.) Instead, the previously registered Collector is returned (which
|
||||
// is helpful if the new and previously registered Collectors are equal but not
|
||||
// identical, i.e. not pointers to the same object).
|
||||
//
|
||||
// As for Register, it is still not safe to call RegisterOrGet with the same
|
||||
// Collector multiple times concurrently.
|
||||
func RegisterOrGet(m Collector) (Collector, error) {
|
||||
return defRegistry.RegisterOrGet(m)
|
||||
}
|
||||
|
||||
// MustRegisterOrGet works like Register but panics where RegisterOrGet would
|
||||
// have returned an error.
|
||||
func MustRegisterOrGet(m Collector) Collector {
|
||||
existing, err := RegisterOrGet(m)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return existing
|
||||
}
|
||||
|
||||
// Unregister unregisters the Collector that equals the Collector passed in as
|
||||
// an argument. (Two Collectors are considered equal if their Describe method
|
||||
// yields the same set of descriptors.) The function returns whether a Collector
|
||||
// was unregistered.
|
||||
func Unregister(c Collector) bool {
|
||||
return defRegistry.Unregister(c)
|
||||
}
|
||||
|
||||
// SetMetricFamilyInjectionHook sets a function that is called whenever metrics
|
||||
// are collected. The hook function must be set before metrics collection begins
|
||||
// (i.e. call SetMetricFamilyInjectionHook before setting the HTTP handler.) The
|
||||
// MetricFamily protobufs returned by the hook function are added to the
|
||||
// delivered metrics. Each returned MetricFamily must have a unique name (also
|
||||
// taking into account the MetricFamilies created in the regular way).
|
||||
//
|
||||
// This is a way to directly inject MetricFamily protobufs managed and owned by
|
||||
// the caller. The caller has full responsibility. No sanity checks are
|
||||
// performed on the returned protobufs (besides the name checks described
|
||||
// above). The function must be callable at any time and concurrently.
|
||||
func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) {
|
||||
defRegistry.metricFamilyInjectionHook = hook
|
||||
}
|
||||
|
||||
// PanicOnCollectError sets the behavior whether a panic is caused upon an error
|
||||
// while metrics are collected and served to the http endpoint. By default, an
|
||||
// internal server error (status code 500) is served with an error message.
|
||||
func PanicOnCollectError(b bool) {
|
||||
defRegistry.panicOnCollectError = b
|
||||
}
|
||||
|
||||
// EnableCollectChecks enables (or disables) additional consistency checks
|
||||
// during metrics collection. These additional checks are not enabled by default
|
||||
// because they inflict a performance penalty and the errors they check for can
|
||||
// only happen if the used Metric and Collector types have internal programming
|
||||
// errors. It can be helpful to enable these checks while working with custom
|
||||
// Collectors or Metrics whose correctness is not well established yet.
|
||||
func EnableCollectChecks(b bool) {
|
||||
defRegistry.collectChecksEnabled = b
|
||||
}
|
||||
|
||||
// Push triggers a metric collection and pushes all collected metrics to the
|
||||
// Pushgateway specified by addr. See the Pushgateway documentation for detailed
|
||||
// implications of the job and instance parameter. instance can be left
|
||||
// empty. The Pushgateway will then use the client's IP number instead. Use just
|
||||
// host:port or ip:port ass addr. (Don't add 'http://' or any path.)
|
||||
//
|
||||
// Note that all previously pushed metrics with the same job and instance will
|
||||
// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
|
||||
// to push to the Pushgateway.)
|
||||
func Push(job, instance, addr string) error {
|
||||
return defRegistry.Push(job, instance, addr, "PUT")
|
||||
}
|
||||
|
||||
// PushAdd works like Push, but only previously pushed metrics with the same
|
||||
// name (and the same job and instance) will be replaced. (It uses HTTP method
|
||||
// 'POST' to push to the Pushgateway.)
|
||||
func PushAdd(job, instance, addr string) error {
|
||||
return defRegistry.Push(job, instance, addr, "POST")
|
||||
}
|
||||
|
||||
// encoder is a function that writes a dto.MetricFamily to an io.Writer in a
|
||||
// certain encoding. It returns the number of bytes written and any error
|
||||
// encountered. Note that ext.WriteDelimited and text.MetricFamilyToText are
|
||||
// encoders.
|
||||
type encoder func(io.Writer, *dto.MetricFamily) (int, error)
|
||||
|
||||
type registry struct {
|
||||
mtx sync.RWMutex
|
||||
collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
|
||||
descIDs map[uint64]struct{}
|
||||
dimHashesByName map[string]uint64
|
||||
bufPool chan *bytes.Buffer
|
||||
metricFamilyPool chan *dto.MetricFamily
|
||||
metricPool chan *dto.Metric
|
||||
metricFamilyInjectionHook func() []*dto.MetricFamily
|
||||
|
||||
panicOnCollectError, collectChecksEnabled bool
|
||||
}
|
||||
|
||||
func (r *registry) Register(c Collector) (Collector, error) {
|
||||
descChan := make(chan *Desc, capDescChan)
|
||||
go func() {
|
||||
c.Describe(descChan)
|
||||
close(descChan)
|
||||
}()
|
||||
|
||||
newDescIDs := map[uint64]struct{}{}
|
||||
newDimHashesByName := map[string]uint64{}
|
||||
var collectorID uint64 // Just a sum of all desc IDs.
|
||||
var duplicateDescErr error
|
||||
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
// Coduct various tests...
|
||||
for desc := range descChan {
|
||||
|
||||
// Is the descriptor valid at all?
|
||||
if desc.err != nil {
|
||||
return c, fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
|
||||
}
|
||||
|
||||
// Is the descID unique?
|
||||
// (In other words: Is the fqName + constLabel combination unique?)
|
||||
if _, exists := r.descIDs[desc.id]; exists {
|
||||
duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
|
||||
}
|
||||
// If it is not a duplicate desc in this collector, add it to
|
||||
// the collectorID. (We allow duplicate descs within the same
|
||||
// collector, but their existence must be a no-op.)
|
||||
if _, exists := newDescIDs[desc.id]; !exists {
|
||||
newDescIDs[desc.id] = struct{}{}
|
||||
collectorID += desc.id
|
||||
}
|
||||
|
||||
// Are all the label names and the help string consistent with
|
||||
// previous descriptors of the same name?
|
||||
// First check existing descriptors...
|
||||
if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
|
||||
if dimHash != desc.dimHash {
|
||||
return nil, fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
|
||||
}
|
||||
} else {
|
||||
// ...then check the new descriptors already seen.
|
||||
if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
|
||||
if dimHash != desc.dimHash {
|
||||
return nil, fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
|
||||
}
|
||||
} else {
|
||||
newDimHashesByName[desc.fqName] = desc.dimHash
|
||||
}
|
||||
}
|
||||
}
|
||||
// Did anything happen at all?
|
||||
if len(newDescIDs) == 0 {
|
||||
return nil, errors.New("collector has no descriptors")
|
||||
}
|
||||
if existing, exists := r.collectorsByID[collectorID]; exists {
|
||||
return existing, errAlreadyReg
|
||||
}
|
||||
// If the collectorID is new, but at least one of the descs existed
|
||||
// before, we are in trouble.
|
||||
if duplicateDescErr != nil {
|
||||
return nil, duplicateDescErr
|
||||
}
|
||||
|
||||
// Only after all tests have passed, actually register.
|
||||
r.collectorsByID[collectorID] = c
|
||||
for hash := range newDescIDs {
|
||||
r.descIDs[hash] = struct{}{}
|
||||
}
|
||||
for name, dimHash := range newDimHashesByName {
|
||||
r.dimHashesByName[name] = dimHash
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (r *registry) RegisterOrGet(m Collector) (Collector, error) {
|
||||
existing, err := r.Register(m)
|
||||
if err != nil && err != errAlreadyReg {
|
||||
return nil, err
|
||||
}
|
||||
return existing, nil
|
||||
}
|
||||
|
||||
func (r *registry) Unregister(c Collector) bool {
|
||||
descChan := make(chan *Desc, capDescChan)
|
||||
go func() {
|
||||
c.Describe(descChan)
|
||||
close(descChan)
|
||||
}()
|
||||
|
||||
descIDs := map[uint64]struct{}{}
|
||||
var collectorID uint64 // Just a sum of the desc IDs.
|
||||
for desc := range descChan {
|
||||
if _, exists := descIDs[desc.id]; !exists {
|
||||
collectorID += desc.id
|
||||
descIDs[desc.id] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
r.mtx.RLock()
|
||||
if _, exists := r.collectorsByID[collectorID]; !exists {
|
||||
r.mtx.RUnlock()
|
||||
return false
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
|
||||
delete(r.collectorsByID, collectorID)
|
||||
for id := range descIDs {
|
||||
delete(r.descIDs, id)
|
||||
}
|
||||
// dimHashesByName is left untouched as those must be consistent
|
||||
// throughout the lifetime of a program.
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *registry) Push(job, instance, addr, method string) error {
|
||||
u := fmt.Sprintf("http://%s/metrics/jobs/%s", addr, url.QueryEscape(job))
|
||||
if instance != "" {
|
||||
u += "/instances/" + url.QueryEscape(instance)
|
||||
}
|
||||
buf := r.getBuf()
|
||||
defer r.giveBuf(buf)
|
||||
if _, err := r.writePB(buf, text.WriteProtoDelimited); err != nil {
|
||||
if r.panicOnCollectError {
|
||||
panic(err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest(method, u, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set(contentTypeHeader, DelimitedTelemetryContentType)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 202 {
|
||||
return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, u)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *registry) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
enc, contentType := chooseEncoder(req)
|
||||
buf := r.getBuf()
|
||||
defer r.giveBuf(buf)
|
||||
writer, encoding := decorateWriter(req, buf)
|
||||
if _, err := r.writePB(writer, enc); err != nil {
|
||||
if r.panicOnCollectError {
|
||||
panic(err)
|
||||
}
|
||||
http.Error(w, "An error has occurred:\n\n"+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if closer, ok := writer.(io.Closer); ok {
|
||||
closer.Close()
|
||||
}
|
||||
header := w.Header()
|
||||
header.Set(contentTypeHeader, contentType)
|
||||
header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
|
||||
if encoding != "" {
|
||||
header.Set(contentEncodingHeader, encoding)
|
||||
}
|
||||
w.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
func (r *registry) writePB(w io.Writer, writeEncoded encoder) (int, error) {
|
||||
var metricHashes map[uint64]struct{}
|
||||
if r.collectChecksEnabled {
|
||||
metricHashes = make(map[uint64]struct{})
|
||||
}
|
||||
metricChan := make(chan Metric, capMetricChan)
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
r.mtx.RLock()
|
||||
metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
|
||||
|
||||
// Scatter.
|
||||
// (Collectors could be complex and slow, so we call them all at once.)
|
||||
wg.Add(len(r.collectorsByID))
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(metricChan)
|
||||
}()
|
||||
for _, collector := range r.collectorsByID {
|
||||
go func(collector Collector) {
|
||||
defer wg.Done()
|
||||
collector.Collect(metricChan)
|
||||
}(collector)
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
|
||||
// Drain metricChan in case of premature return.
|
||||
defer func() {
|
||||
for _ = range metricChan {
|
||||
}
|
||||
}()
|
||||
|
||||
// Gather.
|
||||
for metric := range metricChan {
|
||||
// This could be done concurrently, too, but it required locking
|
||||
// of metricFamiliesByName (and of metricHashes if checks are
|
||||
// enabled). Most likely not worth it.
|
||||
desc := metric.Desc()
|
||||
metricFamily, ok := metricFamiliesByName[desc.fqName]
|
||||
if !ok {
|
||||
metricFamily = r.getMetricFamily()
|
||||
defer r.giveMetricFamily(metricFamily)
|
||||
metricFamily.Name = proto.String(desc.fqName)
|
||||
metricFamily.Help = proto.String(desc.help)
|
||||
metricFamiliesByName[desc.fqName] = metricFamily
|
||||
}
|
||||
dtoMetric := r.getMetric()
|
||||
defer r.giveMetric(dtoMetric)
|
||||
if err := metric.Write(dtoMetric); err != nil {
|
||||
// TODO: Consider different means of error reporting so
|
||||
// that a single erroneous metric could be skipped
|
||||
// instead of blowing up the whole collection.
|
||||
return 0, fmt.Errorf("error collecting metric %v: %s", desc, err)
|
||||
}
|
||||
switch {
|
||||
case metricFamily.Type != nil:
|
||||
// Type already set. We are good.
|
||||
case dtoMetric.Gauge != nil:
|
||||
metricFamily.Type = dto.MetricType_GAUGE.Enum()
|
||||
case dtoMetric.Counter != nil:
|
||||
metricFamily.Type = dto.MetricType_COUNTER.Enum()
|
||||
case dtoMetric.Summary != nil:
|
||||
metricFamily.Type = dto.MetricType_SUMMARY.Enum()
|
||||
case dtoMetric.Untyped != nil:
|
||||
metricFamily.Type = dto.MetricType_UNTYPED.Enum()
|
||||
default:
|
||||
return 0, fmt.Errorf("empty metric collected: %s", dtoMetric)
|
||||
}
|
||||
if r.collectChecksEnabled {
|
||||
if err := r.checkConsistency(metricFamily, dtoMetric, desc, metricHashes); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
|
||||
}
|
||||
|
||||
if r.metricFamilyInjectionHook != nil {
|
||||
for _, mf := range r.metricFamilyInjectionHook() {
|
||||
if _, exists := metricFamiliesByName[mf.GetName()]; exists {
|
||||
return 0, fmt.Errorf("metric family with duplicate name injected: %s", mf)
|
||||
}
|
||||
metricFamiliesByName[mf.GetName()] = mf
|
||||
}
|
||||
}
|
||||
|
||||
// Now that MetricFamilies are all set, sort their Metrics
|
||||
// lexicographically by their label values.
|
||||
for _, mf := range metricFamiliesByName {
|
||||
sort.Sort(metricSorter(mf.Metric))
|
||||
}
|
||||
|
||||
// Write out MetricFamilies sorted by their name.
|
||||
names := make([]string, 0, len(metricFamiliesByName))
|
||||
for name := range metricFamiliesByName {
|
||||
names = append(names, name)
|
||||
}
|
||||
sort.Strings(names)
|
||||
|
||||
var written int
|
||||
for _, name := range names {
|
||||
w, err := writeEncoded(w, metricFamiliesByName[name])
|
||||
written += w
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, desc *Desc, metricHashes map[uint64]struct{}) error {
|
||||
|
||||
// Type consistency with metric family.
|
||||
if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
|
||||
metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
|
||||
metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
|
||||
metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
|
||||
return fmt.Errorf(
|
||||
"collected metric %q is not a %s",
|
||||
dtoMetric, metricFamily.Type,
|
||||
)
|
||||
}
|
||||
|
||||
// Desc consistency with metric family.
|
||||
if metricFamily.GetHelp() != desc.help {
|
||||
return fmt.Errorf(
|
||||
"collected metric %q has help %q but should have %q",
|
||||
dtoMetric, desc.help, metricFamily.GetHelp(),
|
||||
)
|
||||
}
|
||||
|
||||
// Is the desc consistent with the content of the metric?
|
||||
lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label))
|
||||
lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...)
|
||||
for _, l := range desc.variableLabels {
|
||||
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
|
||||
Name: proto.String(l),
|
||||
})
|
||||
}
|
||||
if len(lpsFromDesc) != len(dtoMetric.Label) {
|
||||
return fmt.Errorf(
|
||||
"labels in collected metric %q are inconsistent with descriptor %s",
|
||||
dtoMetric, desc,
|
||||
)
|
||||
}
|
||||
sort.Sort(LabelPairSorter(lpsFromDesc))
|
||||
for i, lpFromDesc := range lpsFromDesc {
|
||||
lpFromMetric := dtoMetric.Label[i]
|
||||
if lpFromDesc.GetName() != lpFromMetric.GetName() ||
|
||||
lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
|
||||
return fmt.Errorf(
|
||||
"labels in collected metric %q are inconsistent with descriptor %s",
|
||||
dtoMetric, desc,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Is the metric unique (i.e. no other metric with the same name and the same label values)?
|
||||
h := fnv.New64a()
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(desc.fqName)
|
||||
buf.WriteByte(model.SeparatorByte)
|
||||
h.Write(buf.Bytes())
|
||||
for _, lp := range dtoMetric.Label {
|
||||
buf.Reset()
|
||||
buf.WriteString(lp.GetValue())
|
||||
buf.WriteByte(model.SeparatorByte)
|
||||
h.Write(buf.Bytes())
|
||||
}
|
||||
metricHash := h.Sum64()
|
||||
if _, exists := metricHashes[metricHash]; exists {
|
||||
return fmt.Errorf(
|
||||
"collected metric %q was collected before with the same name and label values",
|
||||
dtoMetric,
|
||||
)
|
||||
}
|
||||
metricHashes[metricHash] = struct{}{}
|
||||
|
||||
r.mtx.RLock() // Remaining checks need the read lock.
|
||||
defer r.mtx.RUnlock()
|
||||
|
||||
// Is the desc registered?
|
||||
if _, exist := r.descIDs[desc.id]; !exist {
|
||||
return fmt.Errorf("collected metric %q with unregistered descriptor %s", dtoMetric, desc)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *registry) getBuf() *bytes.Buffer {
|
||||
select {
|
||||
case buf := <-r.bufPool:
|
||||
return buf
|
||||
default:
|
||||
return &bytes.Buffer{}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *registry) giveBuf(buf *bytes.Buffer) {
|
||||
buf.Reset()
|
||||
select {
|
||||
case r.bufPool <- buf:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (r *registry) getMetricFamily() *dto.MetricFamily {
|
||||
select {
|
||||
case mf := <-r.metricFamilyPool:
|
||||
return mf
|
||||
default:
|
||||
return &dto.MetricFamily{}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *registry) giveMetricFamily(mf *dto.MetricFamily) {
|
||||
mf.Reset()
|
||||
select {
|
||||
case r.metricFamilyPool <- mf:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (r *registry) getMetric() *dto.Metric {
|
||||
select {
|
||||
case m := <-r.metricPool:
|
||||
return m
|
||||
default:
|
||||
return &dto.Metric{}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *registry) giveMetric(m *dto.Metric) {
|
||||
m.Reset()
|
||||
select {
|
||||
case r.metricPool <- m:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func newRegistry() *registry {
|
||||
return ®istry{
|
||||
collectorsByID: map[uint64]Collector{},
|
||||
descIDs: map[uint64]struct{}{},
|
||||
dimHashesByName: map[string]uint64{},
|
||||
bufPool: make(chan *bytes.Buffer, numBufs),
|
||||
metricFamilyPool: make(chan *dto.MetricFamily, numMetricFamilies),
|
||||
metricPool: make(chan *dto.Metric, numMetrics),
|
||||
}
|
||||
}
|
||||
|
||||
func newDefaultRegistry() *registry {
|
||||
r := newRegistry()
|
||||
r.Register(NewProcessCollector(os.Getpid(), ""))
|
||||
r.Register(NewGoCollector())
|
||||
return r
|
||||
}
|
||||
|
||||
func chooseEncoder(req *http.Request) (encoder, string) {
|
||||
accepts := goautoneg.ParseAccept(req.Header.Get(acceptHeader))
|
||||
for _, accept := range accepts {
|
||||
switch {
|
||||
case accept.Type == "application" &&
|
||||
accept.SubType == "vnd.google.protobuf" &&
|
||||
accept.Params["proto"] == "io.prometheus.client.MetricFamily":
|
||||
switch accept.Params["encoding"] {
|
||||
case "delimited":
|
||||
return text.WriteProtoDelimited, DelimitedTelemetryContentType
|
||||
case "text":
|
||||
return text.WriteProtoText, ProtoTextTelemetryContentType
|
||||
case "compact-text":
|
||||
return text.WriteProtoCompactText, ProtoCompactTextTelemetryContentType
|
||||
default:
|
||||
continue
|
||||
}
|
||||
case accept.Type == "text" &&
|
||||
accept.SubType == "plain" &&
|
||||
(accept.Params["version"] == "0.0.4" || accept.Params["version"] == ""):
|
||||
return text.MetricFamilyToText, TextTelemetryContentType
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
return text.MetricFamilyToText, TextTelemetryContentType
|
||||
}
|
||||
|
||||
// decorateWriter wraps a writer to handle gzip compression if requested. It
|
||||
// returns the decorated writer and the appropriate "Content-Encoding" header
|
||||
// (which is empty if no compression is enabled).
|
||||
func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
|
||||
header := request.Header.Get(acceptEncodingHeader)
|
||||
parts := strings.Split(header, ",")
|
||||
for _, part := range parts {
|
||||
part := strings.TrimSpace(part)
|
||||
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
||||
return gzip.NewWriter(writer), "gzip"
|
||||
}
|
||||
}
|
||||
return writer, ""
|
||||
}
|
||||
|
||||
type metricSorter []*dto.Metric
|
||||
|
||||
func (s metricSorter) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s metricSorter) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s metricSorter) Less(i, j int) bool {
|
||||
for n, lp := range s[i].Label {
|
||||
vi := lp.GetValue()
|
||||
vj := s[j].Label[n].GetValue()
|
||||
if vi != vj {
|
||||
return vi < vj
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
489
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry_test.go
generated
vendored
Normal file
489
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry_test.go
generated
vendored
Normal file
@@ -0,0 +1,489 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Copyright (c) 2013, The Prometheus Authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be found
|
||||
// in the LICENSE file.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"code.google.com/p/goprotobuf/proto"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
type fakeResponseWriter struct {
|
||||
header http.Header
|
||||
body bytes.Buffer
|
||||
}
|
||||
|
||||
func (r *fakeResponseWriter) Header() http.Header {
|
||||
return r.header
|
||||
}
|
||||
|
||||
func (r *fakeResponseWriter) Write(d []byte) (l int, err error) {
|
||||
return r.body.Write(d)
|
||||
}
|
||||
|
||||
func (r *fakeResponseWriter) WriteHeader(c int) {
|
||||
}
|
||||
|
||||
func testHandler(t testing.TB) {
|
||||
|
||||
metricVec := NewCounterVec(
|
||||
CounterOpts{
|
||||
Name: "name",
|
||||
Help: "docstring",
|
||||
ConstLabels: Labels{"constname": "constvalue"},
|
||||
},
|
||||
[]string{"labelname"},
|
||||
)
|
||||
|
||||
metricVec.WithLabelValues("val1").Inc()
|
||||
metricVec.WithLabelValues("val2").Inc()
|
||||
|
||||
varintBuf := make([]byte, binary.MaxVarintLen32)
|
||||
|
||||
externalMetricFamily := []*dto.MetricFamily{
|
||||
{
|
||||
Name: proto.String("externalname"),
|
||||
Help: proto.String("externaldocstring"),
|
||||
Type: dto.MetricType_COUNTER.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
{
|
||||
Label: []*dto.LabelPair{
|
||||
{
|
||||
Name: proto.String("externallabelname"),
|
||||
Value: proto.String("externalval1"),
|
||||
},
|
||||
{
|
||||
Name: proto.String("externalconstname"),
|
||||
Value: proto.String("externalconstvalue"),
|
||||
},
|
||||
},
|
||||
Counter: &dto.Counter{
|
||||
Value: proto.Float64(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
marshaledExternalMetricFamily, err := proto.Marshal(externalMetricFamily[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var externalBuf bytes.Buffer
|
||||
l := binary.PutUvarint(varintBuf, uint64(len(marshaledExternalMetricFamily)))
|
||||
_, err = externalBuf.Write(varintBuf[:l])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = externalBuf.Write(marshaledExternalMetricFamily)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
externalMetricFamilyAsBytes := externalBuf.Bytes()
|
||||
externalMetricFamilyAsText := []byte(`# HELP externalname externaldocstring
|
||||
# TYPE externalname counter
|
||||
externalname{externallabelname="externalval1",externalconstname="externalconstvalue"} 1
|
||||
`)
|
||||
externalMetricFamilyAsProtoText := []byte(`name: "externalname"
|
||||
help: "externaldocstring"
|
||||
type: COUNTER
|
||||
metric: <
|
||||
label: <
|
||||
name: "externallabelname"
|
||||
value: "externalval1"
|
||||
>
|
||||
label: <
|
||||
name: "externalconstname"
|
||||
value: "externalconstvalue"
|
||||
>
|
||||
counter: <
|
||||
value: 1
|
||||
>
|
||||
>
|
||||
|
||||
`)
|
||||
externalMetricFamilyAsProtoCompactText := []byte(`name:"externalname" help:"externaldocstring" type:COUNTER metric:<label:<name:"externallabelname" value:"externalval1" > label:<name:"externalconstname" value:"externalconstvalue" > counter:<value:1 > >
|
||||
`)
|
||||
|
||||
expectedMetricFamily := &dto.MetricFamily{
|
||||
Name: proto.String("name"),
|
||||
Help: proto.String("docstring"),
|
||||
Type: dto.MetricType_COUNTER.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
{
|
||||
Label: []*dto.LabelPair{
|
||||
{
|
||||
Name: proto.String("constname"),
|
||||
Value: proto.String("constvalue"),
|
||||
},
|
||||
{
|
||||
Name: proto.String("labelname"),
|
||||
Value: proto.String("val1"),
|
||||
},
|
||||
},
|
||||
Counter: &dto.Counter{
|
||||
Value: proto.Float64(1),
|
||||
},
|
||||
},
|
||||
{
|
||||
Label: []*dto.LabelPair{
|
||||
{
|
||||
Name: proto.String("constname"),
|
||||
Value: proto.String("constvalue"),
|
||||
},
|
||||
{
|
||||
Name: proto.String("labelname"),
|
||||
Value: proto.String("val2"),
|
||||
},
|
||||
},
|
||||
Counter: &dto.Counter{
|
||||
Value: proto.Float64(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
marshaledExpectedMetricFamily, err := proto.Marshal(expectedMetricFamily)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
l = binary.PutUvarint(varintBuf, uint64(len(marshaledExpectedMetricFamily)))
|
||||
_, err = buf.Write(varintBuf[:l])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = buf.Write(marshaledExpectedMetricFamily)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expectedMetricFamilyAsBytes := buf.Bytes()
|
||||
expectedMetricFamilyAsText := []byte(`# HELP name docstring
|
||||
# TYPE name counter
|
||||
name{constname="constvalue",labelname="val1"} 1
|
||||
name{constname="constvalue",labelname="val2"} 1
|
||||
`)
|
||||
expectedMetricFamilyAsProtoText := []byte(`name: "name"
|
||||
help: "docstring"
|
||||
type: COUNTER
|
||||
metric: <
|
||||
label: <
|
||||
name: "constname"
|
||||
value: "constvalue"
|
||||
>
|
||||
label: <
|
||||
name: "labelname"
|
||||
value: "val1"
|
||||
>
|
||||
counter: <
|
||||
value: 1
|
||||
>
|
||||
>
|
||||
metric: <
|
||||
label: <
|
||||
name: "constname"
|
||||
value: "constvalue"
|
||||
>
|
||||
label: <
|
||||
name: "labelname"
|
||||
value: "val2"
|
||||
>
|
||||
counter: <
|
||||
value: 1
|
||||
>
|
||||
>
|
||||
|
||||
`)
|
||||
expectedMetricFamilyAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val1" > counter:<value:1 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val2" > counter:<value:1 > >
|
||||
`)
|
||||
|
||||
type output struct {
|
||||
headers map[string]string
|
||||
body []byte
|
||||
}
|
||||
|
||||
var scenarios = []struct {
|
||||
headers map[string]string
|
||||
out output
|
||||
withCounter bool
|
||||
withExternalMF bool
|
||||
}{
|
||||
{ // 0
|
||||
headers: map[string]string{
|
||||
"Accept": "foo/bar;q=0.2, dings/bums;q=0.8",
|
||||
},
|
||||
out: output{
|
||||
headers: map[string]string{
|
||||
"Content-Type": `text/plain; version=0.0.4`,
|
||||
},
|
||||
body: []byte{},
|
||||
},
|
||||
},
|
||||
{ // 1
|
||||
headers: map[string]string{
|
||||
"Accept": "foo/bar;q=0.2, application/quark;q=0.8",
|
||||
},
|
||||
out: output{
|
||||
headers: map[string]string{
|
||||
"Content-Type": `text/plain; version=0.0.4`,
|
||||
},
|
||||
body: []byte{},
|
||||
},
|
||||
},
|
||||
{ // 2
|
||||
headers: map[string]string{
|
||||
"Accept": "foo/bar;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.8",
|
||||
},
|
||||
out: output{
|
||||
headers: map[string]string{
|
||||
"Content-Type": `text/plain; version=0.0.4`,
|
||||
},
|
||||
body: []byte{},
|
||||
},
|
||||
},
|
||||
{ // 3
|
||||
headers: map[string]string{
|
||||
"Accept": "text/plain;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.8",
|
||||
},
|
||||
out: output{
|
||||
headers: map[string]string{
|
||||
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
|
||||
},
|
||||
body: []byte{},
|
||||
},
|
||||
},
|
||||
{ // 4
|
||||
headers: map[string]string{
|
||||
"Accept": "application/json",
|
||||
},
|
||||
out: output{
|
||||
headers: map[string]string{
|
||||
"Content-Type": `text/plain; version=0.0.4`,
|
||||
},
|
||||
body: expectedMetricFamilyAsText,
|
||||
},
|
||||
withCounter: true,
|
||||
},
|
||||
{ // 5
|
||||
headers: map[string]string{
|
||||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
|
||||
},
|
||||
out: output{
|
||||
headers: map[string]string{
|
||||
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
|
||||
},
|
||||
body: expectedMetricFamilyAsBytes,
|
||||
},
|
||||
withCounter: true,
|
||||
},
|
||||
{ // 6
|
||||
headers: map[string]string{
|
||||
"Accept": "application/json",
|
||||
},
|
||||
out: output{
|
||||
headers: map[string]string{
|
||||
"Content-Type": `text/plain; version=0.0.4`,
|
||||
},
|
||||
body: externalMetricFamilyAsText,
|
||||
},
|
||||
withExternalMF: true,
|
||||
},
|
||||
{ // 7
|
||||
headers: map[string]string{
|
||||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
|
||||
},
|
||||
out: output{
|
||||
headers: map[string]string{
|
||||
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
|
||||
},
|
||||
body: externalMetricFamilyAsBytes,
|
||||
},
|
||||
withExternalMF: true,
|
||||
},
|
||||
{ // 8
|
||||
headers: map[string]string{
|
||||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
|
||||
},
|
||||
out: output{
|
||||
headers: map[string]string{
|
||||
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
|
||||
},
|
||||
body: bytes.Join(
|
||||
[][]byte{
|
||||
externalMetricFamilyAsBytes,
|
||||
expectedMetricFamilyAsBytes,
|
||||
},
|
||||
[]byte{},
|
||||
),
|
||||
},
|
||||
withCounter: true,
|
||||
withExternalMF: true,
|
||||
},
|
||||
{ // 9
|
||||
headers: map[string]string{
|
||||
"Accept": "text/plain",
|
||||
},
|
||||
out: output{
|
||||
headers: map[string]string{
|
||||
"Content-Type": `text/plain; version=0.0.4`,
|
||||
},
|
||||
body: []byte{},
|
||||
},
|
||||
},
|
||||
{ // 10
|
||||
headers: map[string]string{
|
||||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5",
|
||||
},
|
||||
out: output{
|
||||
headers: map[string]string{
|
||||
"Content-Type": `text/plain; version=0.0.4`,
|
||||
},
|
||||
body: expectedMetricFamilyAsText,
|
||||
},
|
||||
withCounter: true,
|
||||
},
|
||||
{ // 11
|
||||
headers: map[string]string{
|
||||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5;version=0.0.4",
|
||||
},
|
||||
out: output{
|
||||
headers: map[string]string{
|
||||
"Content-Type": `text/plain; version=0.0.4`,
|
||||
},
|
||||
body: bytes.Join(
|
||||
[][]byte{
|
||||
externalMetricFamilyAsText,
|
||||
expectedMetricFamilyAsText,
|
||||
},
|
||||
[]byte{},
|
||||
),
|
||||
},
|
||||
withCounter: true,
|
||||
withExternalMF: true,
|
||||
},
|
||||
{ // 12
|
||||
headers: map[string]string{
|
||||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.2, text/plain;q=0.5;version=0.0.2",
|
||||
},
|
||||
out: output{
|
||||
headers: map[string]string{
|
||||
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
|
||||
},
|
||||
body: bytes.Join(
|
||||
[][]byte{
|
||||
externalMetricFamilyAsBytes,
|
||||
expectedMetricFamilyAsBytes,
|
||||
},
|
||||
[]byte{},
|
||||
),
|
||||
},
|
||||
withCounter: true,
|
||||
withExternalMF: true,
|
||||
},
|
||||
{ // 13
|
||||
headers: map[string]string{
|
||||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=text;q=0.5, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.4",
|
||||
},
|
||||
out: output{
|
||||
headers: map[string]string{
|
||||
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`,
|
||||
},
|
||||
body: bytes.Join(
|
||||
[][]byte{
|
||||
externalMetricFamilyAsProtoText,
|
||||
expectedMetricFamilyAsProtoText,
|
||||
},
|
||||
[]byte{},
|
||||
),
|
||||
},
|
||||
withCounter: true,
|
||||
withExternalMF: true,
|
||||
},
|
||||
{ // 14
|
||||
headers: map[string]string{
|
||||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text",
|
||||
},
|
||||
out: output{
|
||||
headers: map[string]string{
|
||||
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`,
|
||||
},
|
||||
body: bytes.Join(
|
||||
[][]byte{
|
||||
externalMetricFamilyAsProtoCompactText,
|
||||
expectedMetricFamilyAsProtoCompactText,
|
||||
},
|
||||
[]byte{},
|
||||
),
|
||||
},
|
||||
withCounter: true,
|
||||
withExternalMF: true,
|
||||
},
|
||||
}
|
||||
for i, scenario := range scenarios {
|
||||
registry := newRegistry()
|
||||
registry.collectChecksEnabled = true
|
||||
|
||||
if scenario.withCounter {
|
||||
registry.Register(metricVec)
|
||||
}
|
||||
if scenario.withExternalMF {
|
||||
registry.metricFamilyInjectionHook = func() []*dto.MetricFamily {
|
||||
return externalMetricFamily
|
||||
}
|
||||
}
|
||||
writer := &fakeResponseWriter{
|
||||
header: http.Header{},
|
||||
}
|
||||
handler := InstrumentHandler("prometheus", registry)
|
||||
request, _ := http.NewRequest("GET", "/", nil)
|
||||
for key, value := range scenario.headers {
|
||||
request.Header.Add(key, value)
|
||||
}
|
||||
handler(writer, request)
|
||||
|
||||
for key, value := range scenario.out.headers {
|
||||
if writer.Header().Get(key) != value {
|
||||
t.Errorf(
|
||||
"%d. expected %q for header %q, got %q",
|
||||
i, value, key, writer.Header().Get(key),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if !bytes.Equal(scenario.out.body, writer.body.Bytes()) {
|
||||
t.Errorf(
|
||||
"%d. expected %q for body, got %q",
|
||||
i, scenario.out.body, writer.body.Bytes(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandler(t *testing.T) {
|
||||
testHandler(t)
|
||||
}
|
||||
|
||||
func BenchmarkHandler(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testHandler(b)
|
||||
}
|
||||
}
|
||||
424
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
Normal file
424
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
Normal file
@@ -0,0 +1,424 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"code.google.com/p/goprotobuf/proto"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/prometheus/client_golang/_vendor/perks/quantile"
|
||||
)
|
||||
|
||||
// A Summary captures individual observations from an event or sample stream and
|
||||
// summarizes them in a manner similar to traditional summary statistics: 1. sum
|
||||
// of observations, 2. observation count, 3. rank estimations.
|
||||
//
|
||||
// A typical use-case is the observation of request latencies. By default, a
|
||||
// Summary provides the median, the 90th and the 99th percentile of the latency
|
||||
// as rank estimations.
|
||||
//
|
||||
// To create Summary instances, use NewSummary.
|
||||
type Summary interface {
|
||||
Metric
|
||||
Collector
|
||||
|
||||
// Observe adds a single observation to the summary.
|
||||
Observe(float64)
|
||||
}
|
||||
|
||||
// DefObjectives are the default Summary quantile values.
|
||||
var (
|
||||
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
|
||||
)
|
||||
|
||||
// Default values for SummaryOpts.
|
||||
const (
|
||||
// DefMaxAge is the default duration for which observations stay
|
||||
// relevant.
|
||||
DefMaxAge time.Duration = 10 * time.Minute
|
||||
// DefAgeBuckets is the default number of buckets used to calculate the
|
||||
// age of observations.
|
||||
DefAgeBuckets = 5
|
||||
// DefBufCap is the standard buffer size for collecting Summary observations.
|
||||
DefBufCap = 500
|
||||
)
|
||||
|
||||
// SummaryOpts bundles the options for creating a Summary metric. It is
|
||||
// mandatory to set Name and Help to a non-empty string. All other fields are
|
||||
// optional and can safely be left at their zero value.
|
||||
type SummaryOpts struct {
|
||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
||||
// name of the Summary (created by joining these components with
|
||||
// "_"). Only Name is mandatory, the others merely help structuring the
|
||||
// name. Note that the fully-qualified name of the Summary must be a
|
||||
// valid Prometheus metric name.
|
||||
Namespace string
|
||||
Subsystem string
|
||||
Name string
|
||||
|
||||
// Help provides information about this Summary. Mandatory!
|
||||
//
|
||||
// Metrics with the same fully-qualified name must have the same Help
|
||||
// string.
|
||||
Help string
|
||||
|
||||
// ConstLabels are used to attach fixed labels to this
|
||||
// Summary. Summaries with the same fully-qualified name must have the
|
||||
// same label names in their ConstLabels.
|
||||
//
|
||||
// Note that in most cases, labels have a value that varies during the
|
||||
// lifetime of a process. Those labels are usually managed with a
|
||||
// SummaryVec. ConstLabels serve only special purposes. One is for the
|
||||
// special case where the value of a label does not change during the
|
||||
// lifetime of a process, e.g. if the revision of the running binary is
|
||||
// put into a label. Another, more advanced purpose is if more than one
|
||||
// Collector needs to collect Summaries with the same fully-qualified
|
||||
// name. In that case, those Summaries must differ in the values of
|
||||
// their ConstLabels. See the Collector examples.
|
||||
//
|
||||
// If the value of a label never changes (not even between binaries),
|
||||
// that label most likely should not be a label at all (but part of the
|
||||
// metric name).
|
||||
ConstLabels Labels
|
||||
|
||||
// Objectives defines the quantile rank estimates with their respective
|
||||
// absolute error. The default value is DefObjectives.
|
||||
Objectives map[float64]float64
|
||||
|
||||
// MaxAge defines the duration for which an observation stays relevant
|
||||
// for the summary. Must be positive. The default value is DefMaxAge.
|
||||
MaxAge time.Duration
|
||||
|
||||
// AgeBuckets is the number of buckets used to exclude observations that
|
||||
// are older than MaxAge from the summary. A higher number has a
|
||||
// resource penalty, so only increase it if the higher resolution is
|
||||
// really required. The default value is DefAgeBuckets.
|
||||
AgeBuckets uint32
|
||||
|
||||
// BufCap defines the default sample stream buffer size. The default
|
||||
// value of DefBufCap should suffice for most uses. If there is a need
|
||||
// to increase the value, a multiple of 500 is recommended (because that
|
||||
// is the internal buffer size of the underlying package
|
||||
// "github.com/bmizerany/perks/quantile").
|
||||
BufCap uint32
|
||||
|
||||
// Epsilon is the error epsilon for the quantile rank estimate. Must be
|
||||
// positive. The default is DefEpsilon.
|
||||
Epsilon float64
|
||||
}
|
||||
|
||||
// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge
|
||||
// method of perk/quantile is actually not working as advertised - and it might
|
||||
// be unfixable, as the underlying algorithm is apparently not capable of
|
||||
// merging summaries in the first place. To avoid using Merge, we are currently
|
||||
// adding observations to _each_ age bucket, i.e. the effort to add a sample is
|
||||
// essentially multiplied by the number of age buckets. When rotating age
|
||||
// buckets, we empty the previous head stream. On scrape time, we simply take
|
||||
// the quantiles from the head stream (no merging required). Result: More effort
|
||||
// on observation time, less effort on scrape time, which is exactly the
|
||||
// opposite of what we try to accomplish, but at least the results are correct.
|
||||
//
|
||||
// The quite elegant previous contraption to merge the age buckets efficiently
|
||||
// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
|
||||
// can't be used anymore.
|
||||
|
||||
// NewSummary creates a new Summary based on the provided SummaryOpts.
|
||||
func NewSummary(opts SummaryOpts) Summary {
|
||||
return newSummary(
|
||||
NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
nil,
|
||||
opts.ConstLabels,
|
||||
),
|
||||
opts,
|
||||
)
|
||||
}
|
||||
|
||||
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
||||
if len(desc.variableLabels) != len(labelValues) {
|
||||
panic(errInconsistentCardinality)
|
||||
}
|
||||
|
||||
if len(opts.Objectives) == 0 {
|
||||
opts.Objectives = DefObjectives
|
||||
}
|
||||
|
||||
if opts.MaxAge < 0 {
|
||||
panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
|
||||
}
|
||||
if opts.MaxAge == 0 {
|
||||
opts.MaxAge = DefMaxAge
|
||||
}
|
||||
|
||||
if opts.AgeBuckets == 0 {
|
||||
opts.AgeBuckets = DefAgeBuckets
|
||||
}
|
||||
|
||||
if opts.BufCap == 0 {
|
||||
opts.BufCap = DefBufCap
|
||||
}
|
||||
|
||||
s := &summary{
|
||||
desc: desc,
|
||||
|
||||
objectives: opts.Objectives,
|
||||
sortedObjectives: make([]float64, 0, len(opts.Objectives)),
|
||||
|
||||
labelPairs: makeLabelPairs(desc, labelValues),
|
||||
|
||||
hotBuf: make([]float64, 0, opts.BufCap),
|
||||
coldBuf: make([]float64, 0, opts.BufCap),
|
||||
streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
|
||||
}
|
||||
s.headStreamExpTime = time.Now().Add(s.streamDuration)
|
||||
s.hotBufExpTime = s.headStreamExpTime
|
||||
|
||||
for i := uint32(0); i < opts.AgeBuckets; i++ {
|
||||
s.streams = append(s.streams, s.newStream())
|
||||
}
|
||||
s.headStream = s.streams[0]
|
||||
|
||||
for qu := range s.objectives {
|
||||
s.sortedObjectives = append(s.sortedObjectives, qu)
|
||||
}
|
||||
sort.Float64s(s.sortedObjectives)
|
||||
|
||||
s.Init(s) // Init self-collection.
|
||||
return s
|
||||
}
|
||||
|
||||
type summary struct {
|
||||
SelfCollector
|
||||
|
||||
bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
|
||||
mtx sync.Mutex // Protects every other moving part.
|
||||
// Lock bufMtx before mtx if both are needed.
|
||||
|
||||
desc *Desc
|
||||
|
||||
objectives map[float64]float64
|
||||
sortedObjectives []float64
|
||||
|
||||
labelPairs []*dto.LabelPair
|
||||
|
||||
sum float64
|
||||
cnt uint64
|
||||
|
||||
hotBuf, coldBuf []float64
|
||||
|
||||
streams []*quantile.Stream
|
||||
streamDuration time.Duration
|
||||
headStream *quantile.Stream
|
||||
headStreamIdx int
|
||||
headStreamExpTime, hotBufExpTime time.Time
|
||||
}
|
||||
|
||||
func (s *summary) Desc() *Desc {
|
||||
return s.desc
|
||||
}
|
||||
|
||||
func (s *summary) Observe(v float64) {
|
||||
s.bufMtx.Lock()
|
||||
defer s.bufMtx.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
if now.After(s.hotBufExpTime) {
|
||||
s.asyncFlush(now)
|
||||
}
|
||||
s.hotBuf = append(s.hotBuf, v)
|
||||
if len(s.hotBuf) == cap(s.hotBuf) {
|
||||
s.asyncFlush(now)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *summary) Write(out *dto.Metric) error {
|
||||
sum := &dto.Summary{}
|
||||
qs := make([]*dto.Quantile, 0, len(s.objectives))
|
||||
|
||||
s.bufMtx.Lock()
|
||||
s.mtx.Lock()
|
||||
|
||||
if len(s.hotBuf) != 0 {
|
||||
s.swapBufs(time.Now())
|
||||
}
|
||||
s.bufMtx.Unlock()
|
||||
|
||||
s.flushColdBuf()
|
||||
sum.SampleCount = proto.Uint64(s.cnt)
|
||||
sum.SampleSum = proto.Float64(s.sum)
|
||||
|
||||
for _, rank := range s.sortedObjectives {
|
||||
qs = append(qs, &dto.Quantile{
|
||||
Quantile: proto.Float64(rank),
|
||||
Value: proto.Float64(s.headStream.Query(rank)),
|
||||
})
|
||||
}
|
||||
|
||||
s.mtx.Unlock()
|
||||
|
||||
if len(qs) > 0 {
|
||||
sort.Sort(quantSort(qs))
|
||||
}
|
||||
sum.Quantile = qs
|
||||
|
||||
out.Summary = sum
|
||||
out.Label = s.labelPairs
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *summary) newStream() *quantile.Stream {
|
||||
return quantile.NewTargeted(s.objectives)
|
||||
}
|
||||
|
||||
// asyncFlush needs bufMtx locked.
|
||||
func (s *summary) asyncFlush(now time.Time) {
|
||||
s.mtx.Lock()
|
||||
s.swapBufs(now)
|
||||
|
||||
// Unblock the original goroutine that was responsible for the mutation
|
||||
// that triggered the compaction. But hold onto the global non-buffer
|
||||
// state mutex until the operation finishes.
|
||||
go func() {
|
||||
s.flushColdBuf()
|
||||
s.mtx.Unlock()
|
||||
}()
|
||||
}
|
||||
|
||||
// rotateStreams needs mtx AND bufMtx locked.
|
||||
func (s *summary) maybeRotateStreams() {
|
||||
for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
|
||||
s.headStream.Reset()
|
||||
s.headStreamIdx++
|
||||
if s.headStreamIdx >= len(s.streams) {
|
||||
s.headStreamIdx = 0
|
||||
}
|
||||
s.headStream = s.streams[s.headStreamIdx]
|
||||
s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
|
||||
}
|
||||
}
|
||||
|
||||
// flushColdBuf needs mtx locked.
|
||||
func (s *summary) flushColdBuf() {
|
||||
for _, v := range s.coldBuf {
|
||||
for _, stream := range s.streams {
|
||||
stream.Insert(v)
|
||||
}
|
||||
s.cnt++
|
||||
s.sum += v
|
||||
}
|
||||
s.coldBuf = s.coldBuf[0:0]
|
||||
s.maybeRotateStreams()
|
||||
}
|
||||
|
||||
// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
|
||||
func (s *summary) swapBufs(now time.Time) {
|
||||
if len(s.coldBuf) != 0 {
|
||||
panic("coldBuf is not empty")
|
||||
}
|
||||
s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
|
||||
// hotBuf is now empty and gets new expiration set.
|
||||
for now.After(s.hotBufExpTime) {
|
||||
s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
|
||||
}
|
||||
}
|
||||
|
||||
type quantSort []*dto.Quantile
|
||||
|
||||
func (s quantSort) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s quantSort) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s quantSort) Less(i, j int) bool {
|
||||
return s[i].GetQuantile() < s[j].GetQuantile()
|
||||
}
|
||||
|
||||
// SummaryVec is a Collector that bundles a set of Summaries that all share the
|
||||
// same Desc, but have different values for their variable labels. This is used
|
||||
// if you want to count the same thing partitioned by various dimensions
|
||||
// (e.g. http request latencies, partitioned by status code and method). Create
|
||||
// instances with NewSummaryVec.
|
||||
type SummaryVec struct {
|
||||
MetricVec
|
||||
}
|
||||
|
||||
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
|
||||
// partitioned by the given label names. At least one label name must be
|
||||
// provided.
|
||||
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
|
||||
desc := NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
labelNames,
|
||||
opts.ConstLabels,
|
||||
)
|
||||
return &SummaryVec{
|
||||
MetricVec: MetricVec{
|
||||
children: map[uint64]Metric{},
|
||||
desc: desc,
|
||||
hash: fnv.New64a(),
|
||||
newMetric: func(lvs ...string) Metric {
|
||||
return newSummary(desc, opts, lvs...)
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetricWithLabelValues replaces the method of the same name in
|
||||
// MetricVec. The difference is that this method returns a Summary and not a
|
||||
// Metric so that no type conversion is required.
|
||||
func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) {
|
||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
||||
if metric != nil {
|
||||
return metric.(Summary), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
||||
// difference is that this method returns a Summary and not a Metric so that no
|
||||
// type conversion is required.
|
||||
func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
|
||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
||||
if metric != nil {
|
||||
return metric.(Summary), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
||||
// error, WithLabelValues allows shortcuts like
|
||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
||||
func (m *SummaryVec) WithLabelValues(lvs ...string) Summary {
|
||||
return m.MetricVec.WithLabelValues(lvs...).(Summary)
|
||||
}
|
||||
|
||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||
// returned an error. By not returning an error, With allows shortcuts like
|
||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
func (m *SummaryVec) With(labels Labels) Summary {
|
||||
return m.MetricVec.With(labels).(Summary)
|
||||
}
|
||||
328
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary_test.go
generated
vendored
Normal file
328
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary_test.go
generated
vendored
Normal file
@@ -0,0 +1,328 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
"time"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
func benchmarkSummaryObserve(w int, b *testing.B) {
|
||||
b.StopTimer()
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
wg.Add(w)
|
||||
|
||||
g := new(sync.WaitGroup)
|
||||
g.Add(1)
|
||||
|
||||
s := NewSummary(SummaryOpts{})
|
||||
|
||||
for i := 0; i < w; i++ {
|
||||
go func() {
|
||||
g.Wait()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Observe(float64(i))
|
||||
}
|
||||
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
b.StartTimer()
|
||||
g.Done()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func BenchmarkSummaryObserve1(b *testing.B) {
|
||||
benchmarkSummaryObserve(1, b)
|
||||
}
|
||||
|
||||
func BenchmarkSummaryObserve2(b *testing.B) {
|
||||
benchmarkSummaryObserve(2, b)
|
||||
}
|
||||
|
||||
func BenchmarkSummaryObserve4(b *testing.B) {
|
||||
benchmarkSummaryObserve(4, b)
|
||||
}
|
||||
|
||||
func BenchmarkSummaryObserve8(b *testing.B) {
|
||||
benchmarkSummaryObserve(8, b)
|
||||
}
|
||||
|
||||
func benchmarkSummaryWrite(w int, b *testing.B) {
|
||||
b.StopTimer()
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
wg.Add(w)
|
||||
|
||||
g := new(sync.WaitGroup)
|
||||
g.Add(1)
|
||||
|
||||
s := NewSummary(SummaryOpts{})
|
||||
|
||||
for i := 0; i < 1000000; i++ {
|
||||
s.Observe(float64(i))
|
||||
}
|
||||
|
||||
for j := 0; j < w; j++ {
|
||||
outs := make([]dto.Metric, b.N)
|
||||
|
||||
go func(o []dto.Metric) {
|
||||
g.Wait()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Write(&o[i])
|
||||
}
|
||||
|
||||
wg.Done()
|
||||
}(outs)
|
||||
}
|
||||
|
||||
b.StartTimer()
|
||||
g.Done()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func BenchmarkSummaryWrite1(b *testing.B) {
|
||||
benchmarkSummaryWrite(1, b)
|
||||
}
|
||||
|
||||
func BenchmarkSummaryWrite2(b *testing.B) {
|
||||
benchmarkSummaryWrite(2, b)
|
||||
}
|
||||
|
||||
func BenchmarkSummaryWrite4(b *testing.B) {
|
||||
benchmarkSummaryWrite(4, b)
|
||||
}
|
||||
|
||||
func BenchmarkSummaryWrite8(b *testing.B) {
|
||||
benchmarkSummaryWrite(8, b)
|
||||
}
|
||||
|
||||
func TestSummaryConcurrency(t *testing.T) {
|
||||
rand.Seed(42)
|
||||
|
||||
it := func(n uint32) bool {
|
||||
mutations := int(n%1e4 + 1e4)
|
||||
concLevel := int(n%5 + 1)
|
||||
total := mutations * concLevel
|
||||
|
||||
var start, end sync.WaitGroup
|
||||
start.Add(1)
|
||||
end.Add(concLevel)
|
||||
|
||||
sum := NewSummary(SummaryOpts{
|
||||
Name: "test_summary",
|
||||
Help: "helpless",
|
||||
})
|
||||
|
||||
allVars := make([]float64, total)
|
||||
var sampleSum float64
|
||||
for i := 0; i < concLevel; i++ {
|
||||
vals := make([]float64, mutations)
|
||||
for j := 0; j < mutations; j++ {
|
||||
v := rand.NormFloat64()
|
||||
vals[j] = v
|
||||
allVars[i*mutations+j] = v
|
||||
sampleSum += v
|
||||
}
|
||||
|
||||
go func(vals []float64) {
|
||||
start.Wait()
|
||||
for _, v := range vals {
|
||||
sum.Observe(v)
|
||||
}
|
||||
end.Done()
|
||||
}(vals)
|
||||
}
|
||||
sort.Float64s(allVars)
|
||||
start.Done()
|
||||
end.Wait()
|
||||
|
||||
m := &dto.Metric{}
|
||||
sum.Write(m)
|
||||
if got, want := int(*m.Summary.SampleCount), total; got != want {
|
||||
t.Errorf("got sample count %d, want %d", got, want)
|
||||
}
|
||||
if got, want := *m.Summary.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 {
|
||||
t.Errorf("got sample sum %f, want %f", got, want)
|
||||
}
|
||||
|
||||
objectives := make([]float64, 0, len(DefObjectives))
|
||||
for qu := range DefObjectives {
|
||||
objectives = append(objectives, qu)
|
||||
}
|
||||
sort.Float64s(objectives)
|
||||
|
||||
for i, wantQ := range objectives {
|
||||
ε := DefObjectives[wantQ]
|
||||
gotQ := *m.Summary.Quantile[i].Quantile
|
||||
gotV := *m.Summary.Quantile[i].Value
|
||||
min, max := getBounds(allVars, wantQ, ε)
|
||||
if gotQ != wantQ {
|
||||
t.Errorf("got quantile %f, want %f", gotQ, wantQ)
|
||||
}
|
||||
if gotV < min || gotV > max {
|
||||
t.Errorf("got %f for quantile %f, want [%f,%f]", gotV, gotQ, min, max)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
if err := quick.Check(it, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSummaryVecConcurrency(t *testing.T) {
|
||||
rand.Seed(42)
|
||||
|
||||
objectives := make([]float64, 0, len(DefObjectives))
|
||||
for qu := range DefObjectives {
|
||||
|
||||
objectives = append(objectives, qu)
|
||||
}
|
||||
sort.Float64s(objectives)
|
||||
|
||||
it := func(n uint32) bool {
|
||||
mutations := int(n%1e4 + 1e4)
|
||||
concLevel := int(n%7 + 1)
|
||||
vecLength := int(n%3 + 1)
|
||||
|
||||
var start, end sync.WaitGroup
|
||||
start.Add(1)
|
||||
end.Add(concLevel)
|
||||
|
||||
sum := NewSummaryVec(
|
||||
SummaryOpts{
|
||||
Name: "test_summary",
|
||||
Help: "helpless",
|
||||
},
|
||||
[]string{"label"},
|
||||
)
|
||||
|
||||
allVars := make([][]float64, vecLength)
|
||||
sampleSums := make([]float64, vecLength)
|
||||
for i := 0; i < concLevel; i++ {
|
||||
vals := make([]float64, mutations)
|
||||
picks := make([]int, mutations)
|
||||
for j := 0; j < mutations; j++ {
|
||||
v := rand.NormFloat64()
|
||||
vals[j] = v
|
||||
pick := rand.Intn(vecLength)
|
||||
picks[j] = pick
|
||||
allVars[pick] = append(allVars[pick], v)
|
||||
sampleSums[pick] += v
|
||||
}
|
||||
|
||||
go func(vals []float64) {
|
||||
start.Wait()
|
||||
for i, v := range vals {
|
||||
sum.WithLabelValues(string('A' + picks[i])).Observe(v)
|
||||
}
|
||||
end.Done()
|
||||
}(vals)
|
||||
}
|
||||
for _, vars := range allVars {
|
||||
sort.Float64s(vars)
|
||||
}
|
||||
start.Done()
|
||||
end.Wait()
|
||||
|
||||
for i := 0; i < vecLength; i++ {
|
||||
m := &dto.Metric{}
|
||||
s := sum.WithLabelValues(string('A' + i))
|
||||
s.Write(m)
|
||||
if got, want := int(*m.Summary.SampleCount), len(allVars[i]); got != want {
|
||||
t.Errorf("got sample count %d for label %c, want %d", got, 'A'+i, want)
|
||||
}
|
||||
if got, want := *m.Summary.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 {
|
||||
t.Errorf("got sample sum %f for label %c, want %f", got, 'A'+i, want)
|
||||
}
|
||||
for j, wantQ := range objectives {
|
||||
ε := DefObjectives[wantQ]
|
||||
gotQ := *m.Summary.Quantile[j].Quantile
|
||||
gotV := *m.Summary.Quantile[j].Value
|
||||
min, max := getBounds(allVars[i], wantQ, ε)
|
||||
if gotQ != wantQ {
|
||||
t.Errorf("got quantile %f for label %c, want %f", gotQ, 'A'+i, wantQ)
|
||||
}
|
||||
if gotV < min || gotV > max {
|
||||
t.Errorf("got %f for quantile %f for label %c, want [%f,%f]", gotV, gotQ, 'A'+i, min, max)
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
if err := quick.Check(it, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSummaryDecay(t *testing.T) {
|
||||
sum := NewSummary(SummaryOpts{
|
||||
Name: "test_summary",
|
||||
Help: "helpless",
|
||||
MaxAge: 100 * time.Millisecond,
|
||||
Objectives: map[float64]float64{0.1: 0.001},
|
||||
AgeBuckets: 10,
|
||||
})
|
||||
|
||||
m := &dto.Metric{}
|
||||
i := 0
|
||||
tick := time.NewTicker(time.Millisecond)
|
||||
for _ = range tick.C {
|
||||
i++
|
||||
sum.Observe(float64(i))
|
||||
if i%10 == 0 {
|
||||
sum.Write(m)
|
||||
if got, want := *m.Summary.Quantile[0].Value, math.Max(float64(i)/10, float64(i-90)); math.Abs(got-want) > 20 {
|
||||
t.Errorf("%d. got %f, want %f", i, got, want)
|
||||
}
|
||||
m.Reset()
|
||||
}
|
||||
if i >= 1000 {
|
||||
break
|
||||
}
|
||||
}
|
||||
tick.Stop()
|
||||
}
|
||||
|
||||
func getBounds(vars []float64, q, ε float64) (min, max float64) {
|
||||
// TODO: This currently tolerates an error of up to 2*ε. The error must
|
||||
// be at most ε, but for some reason, it's sometimes slightly
|
||||
// higher. That's a bug.
|
||||
n := float64(len(vars))
|
||||
lower := int((q - 2*ε) * n)
|
||||
upper := int(math.Ceil((q + 2*ε) * n))
|
||||
min = vars[0]
|
||||
if lower > 1 {
|
||||
min = vars[lower-1]
|
||||
}
|
||||
max = vars[len(vars)-1]
|
||||
if upper < len(vars) {
|
||||
max = vars[upper-1]
|
||||
}
|
||||
return
|
||||
}
|
||||
145
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/untyped.go
generated
vendored
Normal file
145
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/untyped.go
generated
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import "hash/fnv"
|
||||
|
||||
// Untyped is a Metric that represents a single numerical value that can
|
||||
// arbitrarily go up and down.
|
||||
//
|
||||
// An Untyped metric works the same as a Gauge. The only difference is that to
|
||||
// no type information is implied.
|
||||
//
|
||||
// To create Gauge instances, use NewUntyped.
|
||||
type Untyped interface {
|
||||
Metric
|
||||
Collector
|
||||
|
||||
// Set sets the Untyped metric to an arbitrary value.
|
||||
Set(float64)
|
||||
// Inc increments the Untyped metric by 1.
|
||||
Inc()
|
||||
// Dec decrements the Untyped metric by 1.
|
||||
Dec()
|
||||
// Add adds the given value to the Untyped metric. (The value can be
|
||||
// negative, resulting in a decrease.)
|
||||
Add(float64)
|
||||
// Sub subtracts the given value from the Untyped metric. (The value can
|
||||
// be negative, resulting in an increase.)
|
||||
Sub(float64)
|
||||
}
|
||||
|
||||
// UntypedOpts is an alias for Opts. See there for doc comments.
|
||||
type UntypedOpts Opts
|
||||
|
||||
// NewUntyped creates a new Untyped metric from the provided UntypedOpts.
|
||||
func NewUntyped(opts UntypedOpts) Untyped {
|
||||
return newValue(NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
nil,
|
||||
opts.ConstLabels,
|
||||
), UntypedValue, 0)
|
||||
}
|
||||
|
||||
// UntypedVec is a Collector that bundles a set of Untyped metrics that all
|
||||
// share the same Desc, but have different values for their variable
|
||||
// labels. This is used if you want to count the same thing partitioned by
|
||||
// various dimensions. Create instances with NewUntypedVec.
|
||||
type UntypedVec struct {
|
||||
MetricVec
|
||||
}
|
||||
|
||||
// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
|
||||
// partitioned by the given label names. At least one label name must be
|
||||
// provided.
|
||||
func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
|
||||
desc := NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
labelNames,
|
||||
opts.ConstLabels,
|
||||
)
|
||||
return &UntypedVec{
|
||||
MetricVec: MetricVec{
|
||||
children: map[uint64]Metric{},
|
||||
desc: desc,
|
||||
hash: fnv.New64a(),
|
||||
newMetric: func(lvs ...string) Metric {
|
||||
return newValue(desc, UntypedValue, 0, lvs...)
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetricWithLabelValues replaces the method of the same name in
|
||||
// MetricVec. The difference is that this method returns an Untyped and not a
|
||||
// Metric so that no type conversion is required.
|
||||
func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) {
|
||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
||||
if metric != nil {
|
||||
return metric.(Untyped), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
||||
// difference is that this method returns an Untyped and not a Metric so that no
|
||||
// type conversion is required.
|
||||
func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) {
|
||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
||||
if metric != nil {
|
||||
return metric.(Untyped), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
||||
// error, WithLabelValues allows shortcuts like
|
||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
||||
func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped {
|
||||
return m.MetricVec.WithLabelValues(lvs...).(Untyped)
|
||||
}
|
||||
|
||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||
// returned an error. By not returning an error, With allows shortcuts like
|
||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
func (m *UntypedVec) With(labels Labels) Untyped {
|
||||
return m.MetricVec.With(labels).(Untyped)
|
||||
}
|
||||
|
||||
// UntypedFunc is an Untyped whose value is determined at collect time by
|
||||
// calling a provided function.
|
||||
//
|
||||
// To create UntypedFunc instances, use NewUntypedFunc.
|
||||
type UntypedFunc interface {
|
||||
Metric
|
||||
Collector
|
||||
}
|
||||
|
||||
// NewUntypedFunc creates a new UntypedFunc based on the provided
|
||||
// UntypedOpts. The value reported is determined by calling the given function
|
||||
// from within the Write method. Take into account that metric collection may
|
||||
// happen concurrently. If that results in concurrent calls to Write, like in
|
||||
// the case where an UntypedFunc is directly registered with Prometheus, the
|
||||
// provided function must be concurrency-safe.
|
||||
func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
|
||||
return newValueFunc(NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
nil,
|
||||
opts.ConstLabels,
|
||||
), UntypedValue, function)
|
||||
}
|
||||
230
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
Normal file
230
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
Normal file
@@ -0,0 +1,230 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"sync/atomic"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"code.google.com/p/goprotobuf/proto"
|
||||
)
|
||||
|
||||
// ValueType is an enumeration of metric types that represent a simple value.
|
||||
type ValueType int
|
||||
|
||||
// Possible values for the ValueType enum.
|
||||
const (
|
||||
_ ValueType = iota
|
||||
CounterValue
|
||||
GaugeValue
|
||||
UntypedValue
|
||||
)
|
||||
|
||||
var errInconsistentCardinality = errors.New("inconsistent label cardinality")
|
||||
|
||||
// value is a generic metric for simple values. It implements Metric, Collector,
|
||||
// Counter, Gauge, and Untyped. Its effective type is determined by
|
||||
// ValueType. This is a low-level building block used by the library to back the
|
||||
// implementations of Counter, Gauge, and Untyped.
|
||||
type value struct {
|
||||
SelfCollector
|
||||
|
||||
desc *Desc
|
||||
valType ValueType
|
||||
valBits uint64 // These are the bits of the represented float64 value.
|
||||
labelPairs []*dto.LabelPair
|
||||
}
|
||||
|
||||
// newValue returns a newly allocated value with the given Desc, ValueType,
|
||||
// sample value and label values. It panics if the number of label
|
||||
// values is different from the number of variable labels in Desc.
|
||||
func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value {
|
||||
if len(labelValues) != len(desc.variableLabels) {
|
||||
panic(errInconsistentCardinality)
|
||||
}
|
||||
result := &value{
|
||||
desc: desc,
|
||||
valType: valueType,
|
||||
valBits: math.Float64bits(val),
|
||||
labelPairs: makeLabelPairs(desc, labelValues),
|
||||
}
|
||||
result.Init(result)
|
||||
return result
|
||||
}
|
||||
|
||||
func (v *value) Desc() *Desc {
|
||||
return v.desc
|
||||
}
|
||||
|
||||
func (v *value) Set(val float64) {
|
||||
atomic.StoreUint64(&v.valBits, math.Float64bits(val))
|
||||
}
|
||||
|
||||
func (v *value) Inc() {
|
||||
v.Add(1)
|
||||
}
|
||||
|
||||
func (v *value) Dec() {
|
||||
v.Add(-1)
|
||||
}
|
||||
|
||||
func (v *value) Add(val float64) {
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&v.valBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
|
||||
if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (v *value) Sub(val float64) {
|
||||
v.Add(val * -1)
|
||||
}
|
||||
|
||||
func (v *value) Write(out *dto.Metric) error {
|
||||
val := math.Float64frombits(atomic.LoadUint64(&v.valBits))
|
||||
return populateMetric(v.valType, val, v.labelPairs, out)
|
||||
}
|
||||
|
||||
// valueFunc is a generic metric for simple values retrieved on collect time
|
||||
// from a function. It implements Metric and Collector. Its effective type is
|
||||
// determined by ValueType. This is a low-level building block used by the
|
||||
// library to back the implementations of CounterFunc, GaugeFunc, and
|
||||
// UntypedFunc.
|
||||
type valueFunc struct {
|
||||
SelfCollector
|
||||
|
||||
desc *Desc
|
||||
valType ValueType
|
||||
function func() float64
|
||||
labelPairs []*dto.LabelPair
|
||||
}
|
||||
|
||||
// newValueFunc returns a newly allocated valueFunc with the given Desc and
|
||||
// ValueType. The value reported is determined by calling the given function
|
||||
// from within the Write method. Take into account that metric collection may
|
||||
// happen concurrently. If that results in concurrent calls to Write, like in
|
||||
// the case where a valueFunc is directly registered with Prometheus, the
|
||||
// provided function must be concurrency-safe.
|
||||
func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
|
||||
result := &valueFunc{
|
||||
desc: desc,
|
||||
valType: valueType,
|
||||
function: function,
|
||||
labelPairs: makeLabelPairs(desc, nil),
|
||||
}
|
||||
result.Init(result)
|
||||
return result
|
||||
}
|
||||
|
||||
func (v *valueFunc) Desc() *Desc {
|
||||
return v.desc
|
||||
}
|
||||
|
||||
func (v *valueFunc) Write(out *dto.Metric) error {
|
||||
return populateMetric(v.valType, v.function(), v.labelPairs, out)
|
||||
}
|
||||
|
||||
// NewConstMetric returns a metric with one fixed value that cannot be
|
||||
// changed. Users of this package will not have much use for it in regular
|
||||
// operations. However, when implementing custom Collectors, it is useful as a
|
||||
// throw-away metric that is generated on the fly to send it to Prometheus in
|
||||
// the Collect method. NewConstMetric returns an error if the length of
|
||||
// labelValues is not consistent with the variable labels in Desc.
|
||||
func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
|
||||
if len(desc.variableLabels) != len(labelValues) {
|
||||
return nil, errInconsistentCardinality
|
||||
}
|
||||
return &constMetric{
|
||||
desc: desc,
|
||||
valType: valueType,
|
||||
val: value,
|
||||
labelPairs: makeLabelPairs(desc, labelValues),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MustNewConstMetric is a version of NewConstMetric that panics where
|
||||
// NewConstMetric would have returned an error.
|
||||
func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
|
||||
m, err := NewConstMetric(desc, valueType, value, labelValues...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type constMetric struct {
|
||||
desc *Desc
|
||||
valType ValueType
|
||||
val float64
|
||||
labelPairs []*dto.LabelPair
|
||||
}
|
||||
|
||||
func (m *constMetric) Desc() *Desc {
|
||||
return m.desc
|
||||
}
|
||||
|
||||
func (m *constMetric) Write(out *dto.Metric) error {
|
||||
return populateMetric(m.valType, m.val, m.labelPairs, out)
|
||||
}
|
||||
|
||||
func populateMetric(
|
||||
t ValueType,
|
||||
v float64,
|
||||
labelPairs []*dto.LabelPair,
|
||||
m *dto.Metric,
|
||||
) error {
|
||||
m.Label = labelPairs
|
||||
switch t {
|
||||
case CounterValue:
|
||||
m.Counter = &dto.Counter{Value: proto.Float64(v)}
|
||||
case GaugeValue:
|
||||
m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
|
||||
case UntypedValue:
|
||||
m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
|
||||
default:
|
||||
return fmt.Errorf("encountered unknown type %v", t)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
|
||||
totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
|
||||
if totalLen == 0 {
|
||||
// Super fast path.
|
||||
return nil
|
||||
}
|
||||
if len(desc.variableLabels) == 0 {
|
||||
// Moderately fast path.
|
||||
return desc.constLabelPairs
|
||||
}
|
||||
labelPairs := make([]*dto.LabelPair, 0, totalLen)
|
||||
for i, n := range desc.variableLabels {
|
||||
labelPairs = append(labelPairs, &dto.LabelPair{
|
||||
Name: proto.String(n),
|
||||
Value: proto.String(labelValues[i]),
|
||||
})
|
||||
}
|
||||
for _, lp := range desc.constLabelPairs {
|
||||
labelPairs = append(labelPairs, lp)
|
||||
}
|
||||
sort.Sort(LabelPairSorter(labelPairs))
|
||||
return labelPairs
|
||||
}
|
||||
241
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
Normal file
241
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
Normal file
@@ -0,0 +1,241 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"hash"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// MetricVec is a Collector to bundle metrics of the same name that
|
||||
// differ in their label values. MetricVec is usually not used directly but as a
|
||||
// building block for implementations of vectors of a given metric
|
||||
// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
|
||||
// provided in this package.
|
||||
type MetricVec struct {
|
||||
mtx sync.RWMutex // Protects not only children, but also hash and buf.
|
||||
children map[uint64]Metric
|
||||
desc *Desc
|
||||
|
||||
// hash is our own hash instance to avoid repeated allocations.
|
||||
hash hash.Hash64
|
||||
// buf is used to copy string contents into it for hashing,
|
||||
// again to avoid allocations.
|
||||
buf bytes.Buffer
|
||||
|
||||
newMetric func(labelValues ...string) Metric
|
||||
}
|
||||
|
||||
// Describe implements Collector. The length of the returned slice
|
||||
// is always one.
|
||||
func (m *MetricVec) Describe(ch chan<- *Desc) {
|
||||
ch <- m.desc
|
||||
}
|
||||
|
||||
// Collect implements Collector.
|
||||
func (m *MetricVec) Collect(ch chan<- Metric) {
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
|
||||
for _, metric := range m.children {
|
||||
ch <- metric
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetricWithLabelValues returns the Metric for the given slice of label
|
||||
// values (same order as the VariableLabels in Desc). If that combination of
|
||||
// label values is accessed for the first time, a new Metric is created.
|
||||
// Keeping the Metric for later use is possible (and should be considered if
|
||||
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
|
||||
// Delete can be used to delete the Metric from the MetricVec. In that case, the
|
||||
// Metric will still exist, but it will not be exported anymore, even if a
|
||||
// Metric with the same label values is created later. See also the CounterVec
|
||||
// example.
|
||||
//
|
||||
// An error is returned if the number of label values is not the same as the
|
||||
// number of VariableLabels in Desc.
|
||||
//
|
||||
// Note that for more than one label value, this method is prone to mistakes
|
||||
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
|
||||
// an alternative to avoid that type of mistake. For higher label numbers, the
|
||||
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||
// with a performance overhead (for creating and processing the Labels map).
|
||||
// See also the GaugeVec example.
|
||||
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
h, err := m.hashLabelValues(lvs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m.getOrCreateMetric(h, lvs...), nil
|
||||
}
|
||||
|
||||
// GetMetricWith returns the Metric for the given Labels map (the label names
|
||||
// must match those of the VariableLabels in Desc). If that label map is
|
||||
// accessed for the first time, a new Metric is created. Implications of keeping
|
||||
// the Metric are the same as for GetMetricWithLabelValues.
|
||||
//
|
||||
// An error is returned if the number and names of the Labels are inconsistent
|
||||
// with those of the VariableLabels in Desc.
|
||||
//
|
||||
// This method is used for the same purpose as
|
||||
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
||||
// methods.
|
||||
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
h, err := m.hashLabels(labels)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lvs := make([]string, len(labels))
|
||||
for i, label := range m.desc.variableLabels {
|
||||
lvs[i] = labels[label]
|
||||
}
|
||||
return m.getOrCreateMetric(h, lvs...), nil
|
||||
}
|
||||
|
||||
// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
|
||||
// occurs. The method allows neat syntax like:
|
||||
// httpReqs.WithLabelValues("404", "POST").Inc()
|
||||
func (m *MetricVec) WithLabelValues(lvs ...string) Metric {
|
||||
metric, err := m.GetMetricWithLabelValues(lvs...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return metric
|
||||
}
|
||||
|
||||
// With works as GetMetricWith, but panics if an error occurs. The method allows
|
||||
// neat syntax like:
|
||||
// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc()
|
||||
func (m *MetricVec) With(labels Labels) Metric {
|
||||
metric, err := m.GetMetricWith(labels)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return metric
|
||||
}
|
||||
|
||||
// DeleteLabelValues removes the metric where the variable labels are the same
|
||||
// as those passed in as labels (same order as the VariableLabels in Desc). It
|
||||
// returns true if a metric was deleted.
|
||||
//
|
||||
// It is not an error if the number of label values is not the same as the
|
||||
// number of VariableLabels in Desc. However, such inconsistent label count can
|
||||
// never match an actual Metric, so the method will always return false in that
|
||||
// case.
|
||||
//
|
||||
// Note that for more than one label value, this method is prone to mistakes
|
||||
// caused by an incorrect order of arguments. Consider Delete(Labels) as an
|
||||
// alternative to avoid that type of mistake. For higher label numbers, the
|
||||
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||
// with a performance overhead (for creating and processing the Labels map).
|
||||
// See also the CounterVec example.
|
||||
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
h, err := m.hashLabelValues(lvs)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if _, has := m.children[h]; !has {
|
||||
return false
|
||||
}
|
||||
delete(m.children, h)
|
||||
return true
|
||||
}
|
||||
|
||||
// Delete deletes the metric where the variable labels are the same as those
|
||||
// passed in as labels. It returns true if a metric was deleted.
|
||||
//
|
||||
// It is not an error if the number and names of the Labels are inconsistent
|
||||
// with those of the VariableLabels in the Desc of the MetricVec. However, such
|
||||
// inconsistent Labels can never match an actual Metric, so the method will
|
||||
// always return false in that case.
|
||||
//
|
||||
// This method is used for the same purpose as DeleteLabelValues(...string). See
|
||||
// there for pros and cons of the two methods.
|
||||
func (m *MetricVec) Delete(labels Labels) bool {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
h, err := m.hashLabels(labels)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if _, has := m.children[h]; !has {
|
||||
return false
|
||||
}
|
||||
delete(m.children, h)
|
||||
return true
|
||||
}
|
||||
|
||||
// Reset deletes all metrics in this vector.
|
||||
func (m *MetricVec) Reset() {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
for h := range m.children {
|
||||
delete(m.children, h)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
|
||||
if len(vals) != len(m.desc.variableLabels) {
|
||||
return 0, errInconsistentCardinality
|
||||
}
|
||||
m.hash.Reset()
|
||||
for _, val := range vals {
|
||||
m.buf.Reset()
|
||||
m.buf.WriteString(val)
|
||||
m.hash.Write(m.buf.Bytes())
|
||||
}
|
||||
return m.hash.Sum64(), nil
|
||||
}
|
||||
|
||||
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
|
||||
if len(labels) != len(m.desc.variableLabels) {
|
||||
return 0, errInconsistentCardinality
|
||||
}
|
||||
m.hash.Reset()
|
||||
for _, label := range m.desc.variableLabels {
|
||||
val, ok := labels[label]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("label name %q missing in label map", label)
|
||||
}
|
||||
m.buf.Reset()
|
||||
m.buf.WriteString(val)
|
||||
m.hash.Write(m.buf.Bytes())
|
||||
}
|
||||
return m.hash.Sum64(), nil
|
||||
}
|
||||
|
||||
func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric {
|
||||
metric, ok := m.children[hash]
|
||||
if !ok {
|
||||
// Copy labelValues. Otherwise, they would be allocated even if we don't go
|
||||
// down this code path.
|
||||
copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...)
|
||||
metric = m.newMetric(copiedLabelValues...)
|
||||
m.children[hash] = metric
|
||||
}
|
||||
return metric
|
||||
}
|
||||
91
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/vec_test.go
generated
vendored
Normal file
91
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/vec_test.go
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"hash/fnv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
desc := NewDesc("test", "helpless", []string{"l1", "l2"}, nil)
|
||||
vec := MetricVec{
|
||||
children: map[uint64]Metric{},
|
||||
desc: desc,
|
||||
hash: fnv.New64a(),
|
||||
newMetric: func(lvs ...string) Metric {
|
||||
return newValue(desc, UntypedValue, 0, lvs...)
|
||||
},
|
||||
}
|
||||
|
||||
if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
|
||||
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
|
||||
if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), true; got != want {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
|
||||
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
|
||||
if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), true; got != want {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), false; got != want {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
|
||||
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
|
||||
if got, want := vec.Delete(Labels{"l2": "v1", "l1": "v2"}), false; got != want {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
if got, want := vec.Delete(Labels{"l1": "v1"}), false; got != want {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteLabelValues(t *testing.T) {
|
||||
desc := NewDesc("test", "helpless", []string{"l1", "l2"}, nil)
|
||||
vec := MetricVec{
|
||||
children: map[uint64]Metric{},
|
||||
desc: desc,
|
||||
hash: fnv.New64a(),
|
||||
newMetric: func(lvs ...string) Metric {
|
||||
return newValue(desc, UntypedValue, 0, lvs...)
|
||||
},
|
||||
}
|
||||
|
||||
if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
|
||||
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
|
||||
if got, want := vec.DeleteLabelValues("v1", "v2"), true; got != want {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
|
||||
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
|
||||
if got, want := vec.DeleteLabelValues("v2", "v1"), false; got != want {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
if got, want := vec.DeleteLabelValues("v1"), false; got != want {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
168
Godeps/_workspace/src/github.com/prometheus/client_golang/text/bench_test.go
generated
vendored
Normal file
168
Godeps/_workspace/src/github.com/prometheus/client_golang/text/bench_test.go
generated
vendored
Normal file
@@ -0,0 +1,168 @@
|
||||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package text
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/matttproud/golang_protobuf_extensions/ext"
|
||||
)
|
||||
|
||||
// Benchmarks to show how much penalty text format parsing actually inflicts.
|
||||
//
|
||||
// Example results on Linux 3.13.0, Intel(R) Core(TM) i7-4700MQ CPU @ 2.40GHz, go1.4.
|
||||
//
|
||||
// BenchmarkParseText 1000 1188535 ns/op 205085 B/op 6135 allocs/op
|
||||
// BenchmarkParseTextGzip 1000 1376567 ns/op 246224 B/op 6151 allocs/op
|
||||
// BenchmarkParseProto 10000 172790 ns/op 52258 B/op 1160 allocs/op
|
||||
// BenchmarkParseProtoGzip 5000 324021 ns/op 94931 B/op 1211 allocs/op
|
||||
// BenchmarkParseProtoMap 10000 187946 ns/op 58714 B/op 1203 allocs/op
|
||||
//
|
||||
// CONCLUSION: The overhead for the map is negligible. Text format needs ~5x more allocations.
|
||||
// Without compression, it needs ~7x longer, but with compression (the more relevant scenario),
|
||||
// the difference becomes less relevant, only ~4x.
|
||||
//
|
||||
// The test data contains 248 samples.
|
||||
//
|
||||
// BenchmarkProcessor002ParseOnly in the extraction package is not quite
|
||||
// comparable to the benchmarks here, but it gives an idea: JSON parsing is even
|
||||
// slower than text parsing and needs a comparable amount of allocs.
|
||||
|
||||
// BenchmarkParseText benchmarks the parsing of a text-format scrape into metric
|
||||
// family DTOs.
|
||||
func BenchmarkParseText(b *testing.B) {
|
||||
b.StopTimer()
|
||||
data, err := ioutil.ReadFile("testdata/text")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
if _, err := parser.TextToMetricFamilies(bytes.NewReader(data)); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkParseTextGzip benchmarks the parsing of a gzipped text-format scrape
|
||||
// into metric family DTOs.
|
||||
func BenchmarkParseTextGzip(b *testing.B) {
|
||||
b.StopTimer()
|
||||
data, err := ioutil.ReadFile("testdata/text.gz")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
in, err := gzip.NewReader(bytes.NewReader(data))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if _, err := parser.TextToMetricFamilies(in); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkParseProto benchmarks the parsing of a protobuf-format scrape into
|
||||
// metric family DTOs. Note that this does not build a map of metric families
|
||||
// (as the text version does), because it is not required for Prometheus
|
||||
// ingestion either. (However, it is required for the text-format parsing, as
|
||||
// the metric family might be sprinkled all over the text, while the
|
||||
// protobuf-format guarantees bundling at one place.)
|
||||
func BenchmarkParseProto(b *testing.B) {
|
||||
b.StopTimer()
|
||||
data, err := ioutil.ReadFile("testdata/protobuf")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
family := &dto.MetricFamily{}
|
||||
in := bytes.NewReader(data)
|
||||
for {
|
||||
family.Reset()
|
||||
if _, err := ext.ReadDelimited(in, family); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkParseProtoGzip is like BenchmarkParseProto above, but parses gzipped
|
||||
// protobuf format.
|
||||
func BenchmarkParseProtoGzip(b *testing.B) {
|
||||
b.StopTimer()
|
||||
data, err := ioutil.ReadFile("testdata/protobuf.gz")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
family := &dto.MetricFamily{}
|
||||
in, err := gzip.NewReader(bytes.NewReader(data))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
for {
|
||||
family.Reset()
|
||||
if _, err := ext.ReadDelimited(in, family); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkParseProtoMap is like BenchmarkParseProto but DOES put the parsed
|
||||
// metric family DTOs into a map. This is not happening during Prometheus
|
||||
// ingestion. It is just here to measure the overhead of that map creation and
|
||||
// separate it from the overhead of the text format parsing.
|
||||
func BenchmarkParseProtoMap(b *testing.B) {
|
||||
b.StopTimer()
|
||||
data, err := ioutil.ReadFile("testdata/protobuf")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
families := map[string]*dto.MetricFamily{}
|
||||
in := bytes.NewReader(data)
|
||||
for {
|
||||
family := &dto.MetricFamily{}
|
||||
if _, err := ext.ReadDelimited(in, family); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
b.Fatal(err)
|
||||
}
|
||||
families[family.GetName()] = family
|
||||
}
|
||||
}
|
||||
}
|
||||
265
Godeps/_workspace/src/github.com/prometheus/client_golang/text/create.go
generated
vendored
Normal file
265
Godeps/_workspace/src/github.com/prometheus/client_golang/text/create.go
generated
vendored
Normal file
@@ -0,0 +1,265 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package text contains helper functions to parse and create text-based
|
||||
// exchange formats. The package currently supports (only) version 0.0.4 of the
|
||||
// exchange format. Should other versions be supported in the future, some
|
||||
// versioning scheme has to be applied. Possibilities include separate packages
|
||||
// or separate functions. The best way depends on the nature of future changes,
|
||||
// which is the reason why no versioning scheme has been applied prematurely
|
||||
// here.
|
||||
package text
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// MetricFamilyToText converts a MetricFamily proto message into text format and
|
||||
// writes the resulting lines to 'out'. It returns the number of bytes written
|
||||
// and any error encountered. This function does not perform checks on the
|
||||
// content of the metric and label names, i.e. invalid metric or label names
|
||||
// will result in invalid text format output.
|
||||
// This method fulfills the type 'prometheus.encoder'.
|
||||
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
|
||||
var written int
|
||||
|
||||
// Fail-fast checks.
|
||||
if len(in.Metric) == 0 {
|
||||
return written, fmt.Errorf("MetricFamily has no metrics: %s", in)
|
||||
}
|
||||
name := in.GetName()
|
||||
if name == "" {
|
||||
return written, fmt.Errorf("MetricFamily has no name: %s", in)
|
||||
}
|
||||
if in.Type == nil {
|
||||
return written, fmt.Errorf("MetricFamily has no type: %s", in)
|
||||
}
|
||||
|
||||
// Comments, first HELP, then TYPE.
|
||||
if in.Help != nil {
|
||||
n, err := fmt.Fprintf(
|
||||
out, "# HELP %s %s\n",
|
||||
name, escapeString(*in.Help, false),
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
metricType := in.GetType()
|
||||
n, err := fmt.Fprintf(
|
||||
out, "# TYPE %s %s\n",
|
||||
name, strings.ToLower(metricType.String()),
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
|
||||
// Finally the samples, one line for each.
|
||||
for _, metric := range in.Metric {
|
||||
switch metricType {
|
||||
case dto.MetricType_COUNTER:
|
||||
if metric.Counter == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected counter in metric %s", metric,
|
||||
)
|
||||
}
|
||||
n, err = writeSample(
|
||||
name, metric, "", "",
|
||||
metric.Counter.GetValue(),
|
||||
out,
|
||||
)
|
||||
case dto.MetricType_GAUGE:
|
||||
if metric.Gauge == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected gauge in metric %s", metric,
|
||||
)
|
||||
}
|
||||
n, err = writeSample(
|
||||
name, metric, "", "",
|
||||
metric.Gauge.GetValue(),
|
||||
out,
|
||||
)
|
||||
case dto.MetricType_UNTYPED:
|
||||
if metric.Untyped == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected untyped in metric %s", metric,
|
||||
)
|
||||
}
|
||||
n, err = writeSample(
|
||||
name, metric, "", "",
|
||||
metric.Untyped.GetValue(),
|
||||
out,
|
||||
)
|
||||
case dto.MetricType_SUMMARY:
|
||||
if metric.Summary == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected summary in metric %s", metric,
|
||||
)
|
||||
}
|
||||
for _, q := range metric.Summary.Quantile {
|
||||
n, err = writeSample(
|
||||
name, metric,
|
||||
"quantile", fmt.Sprint(q.GetQuantile()),
|
||||
q.GetValue(),
|
||||
out,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
n, err = writeSample(
|
||||
name+"_sum", metric, "", "",
|
||||
metric.Summary.GetSampleSum(),
|
||||
out,
|
||||
)
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
written += n
|
||||
n, err = writeSample(
|
||||
name+"_count", metric, "", "",
|
||||
float64(metric.Summary.GetSampleCount()),
|
||||
out,
|
||||
)
|
||||
default:
|
||||
return written, fmt.Errorf(
|
||||
"unexpected type in metric %s", metric,
|
||||
)
|
||||
}
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// writeSample writes a single sample in text format to out, given the metric
|
||||
// name, the metric proto message itself, optionally an additional label name
|
||||
// and value (use empty strings if not required), and the value. The function
|
||||
// returns the number of bytes written and any error encountered.
|
||||
func writeSample(
|
||||
name string,
|
||||
metric *dto.Metric,
|
||||
additionalLabelName, additionalLabelValue string,
|
||||
value float64,
|
||||
out io.Writer,
|
||||
) (int, error) {
|
||||
var written int
|
||||
n, err := fmt.Fprint(out, name)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = labelPairsToText(
|
||||
metric.Label,
|
||||
additionalLabelName, additionalLabelValue,
|
||||
out,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = fmt.Fprintf(out, " %v", value)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
if metric.TimestampMs != nil {
|
||||
n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
n, err = out.Write([]byte{'\n'})
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// labelPairsToText converts a slice of LabelPair proto messages plus the
|
||||
// explicitly given additional label pair into text formatted as required by the
|
||||
// text format and writes it to 'out'. An empty slice in combination with an
|
||||
// empty string 'additionalLabelName' results in nothing being
|
||||
// written. Otherwise, the label pairs are written, escaped as required by the
|
||||
// text format, and enclosed in '{...}'. The function returns the number of
|
||||
// bytes written and any error encountered.
|
||||
func labelPairsToText(
|
||||
in []*dto.LabelPair,
|
||||
additionalLabelName, additionalLabelValue string,
|
||||
out io.Writer,
|
||||
) (int, error) {
|
||||
if len(in) == 0 && additionalLabelName == "" {
|
||||
return 0, nil
|
||||
}
|
||||
var written int
|
||||
separator := '{'
|
||||
for _, lp := range in {
|
||||
n, err := fmt.Fprintf(
|
||||
out, `%c%s="%s"`,
|
||||
separator, lp.GetName(), escapeString(lp.GetValue(), true),
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
separator = ','
|
||||
}
|
||||
if additionalLabelName != "" {
|
||||
n, err := fmt.Fprintf(
|
||||
out, `%c%s="%s"`,
|
||||
separator, additionalLabelName,
|
||||
escapeString(additionalLabelValue, true),
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
n, err := out.Write([]byte{'}'})
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// escapeString replaces '\' by '\\', new line character by '\n', and - if
|
||||
// includeDoubleQuote is true - '"' by '\"'.
|
||||
func escapeString(v string, includeDoubleQuote bool) string {
|
||||
result := bytes.NewBuffer(make([]byte, 0, len(v)))
|
||||
for _, c := range v {
|
||||
switch {
|
||||
case c == '\\':
|
||||
result.WriteString(`\\`)
|
||||
case includeDoubleQuote && c == '"':
|
||||
result.WriteString(`\"`)
|
||||
case c == '\n':
|
||||
result.WriteString(`\n`)
|
||||
default:
|
||||
result.WriteRune(c)
|
||||
}
|
||||
}
|
||||
return result.String()
|
||||
}
|
||||
347
Godeps/_workspace/src/github.com/prometheus/client_golang/text/create_test.go
generated
vendored
Normal file
347
Godeps/_workspace/src/github.com/prometheus/client_golang/text/create_test.go
generated
vendored
Normal file
@@ -0,0 +1,347 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package text
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"code.google.com/p/goprotobuf/proto"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
func testCreate(t testing.TB) {
|
||||
var scenarios = []struct {
|
||||
in *dto.MetricFamily
|
||||
out string
|
||||
}{
|
||||
// 0: Counter, NaN as value, timestamp given.
|
||||
{
|
||||
in: &dto.MetricFamily{
|
||||
Name: proto.String("name"),
|
||||
Help: proto.String("two-line\n doc str\\ing"),
|
||||
Type: dto.MetricType_COUNTER.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("labelname"),
|
||||
Value: proto.String("val1"),
|
||||
},
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("basename"),
|
||||
Value: proto.String("basevalue"),
|
||||
},
|
||||
},
|
||||
Counter: &dto.Counter{
|
||||
Value: proto.Float64(math.NaN()),
|
||||
},
|
||||
},
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("labelname"),
|
||||
Value: proto.String("val2"),
|
||||
},
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("basename"),
|
||||
Value: proto.String("basevalue"),
|
||||
},
|
||||
},
|
||||
Counter: &dto.Counter{
|
||||
Value: proto.Float64(.23),
|
||||
},
|
||||
TimestampMs: proto.Int64(1234567890),
|
||||
},
|
||||
},
|
||||
},
|
||||
out: `# HELP name two-line\n doc str\\ing
|
||||
# TYPE name counter
|
||||
name{labelname="val1",basename="basevalue"} NaN
|
||||
name{labelname="val2",basename="basevalue"} 0.23 1234567890
|
||||
`,
|
||||
},
|
||||
// 1: Gauge, some escaping required, +Inf as value, multi-byte characters in label values.
|
||||
{
|
||||
in: &dto.MetricFamily{
|
||||
Name: proto.String("gauge_name"),
|
||||
Help: proto.String("gauge\ndoc\nstr\"ing"),
|
||||
Type: dto.MetricType_GAUGE.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("name_1"),
|
||||
Value: proto.String("val with\nnew line"),
|
||||
},
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("name_2"),
|
||||
Value: proto.String("val with \\backslash and \"quotes\""),
|
||||
},
|
||||
},
|
||||
Gauge: &dto.Gauge{
|
||||
Value: proto.Float64(math.Inf(+1)),
|
||||
},
|
||||
},
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("name_1"),
|
||||
Value: proto.String("Björn"),
|
||||
},
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("name_2"),
|
||||
Value: proto.String("佖佥"),
|
||||
},
|
||||
},
|
||||
Gauge: &dto.Gauge{
|
||||
Value: proto.Float64(3.14E42),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
out: `# HELP gauge_name gauge\ndoc\nstr"ing
|
||||
# TYPE gauge_name gauge
|
||||
gauge_name{name_1="val with\nnew line",name_2="val with \\backslash and \"quotes\""} +Inf
|
||||
gauge_name{name_1="Björn",name_2="佖佥"} 3.14e+42
|
||||
`,
|
||||
},
|
||||
// 2: Untyped, no help, one sample with no labels and -Inf as value, another sample with one label.
|
||||
{
|
||||
in: &dto.MetricFamily{
|
||||
Name: proto.String("untyped_name"),
|
||||
Type: dto.MetricType_UNTYPED.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(math.Inf(-1)),
|
||||
},
|
||||
},
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("name_1"),
|
||||
Value: proto.String("value 1"),
|
||||
},
|
||||
},
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(-1.23e-45),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
out: `# TYPE untyped_name untyped
|
||||
untyped_name -Inf
|
||||
untyped_name{name_1="value 1"} -1.23e-45
|
||||
`,
|
||||
},
|
||||
// 3: Summary.
|
||||
{
|
||||
in: &dto.MetricFamily{
|
||||
Name: proto.String("summary_name"),
|
||||
Help: proto.String("summary docstring"),
|
||||
Type: dto.MetricType_SUMMARY.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Summary: &dto.Summary{
|
||||
SampleCount: proto.Uint64(42),
|
||||
SampleSum: proto.Float64(-3.4567),
|
||||
Quantile: []*dto.Quantile{
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(0.5),
|
||||
Value: proto.Float64(-1.23),
|
||||
},
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(0.9),
|
||||
Value: proto.Float64(.2342354),
|
||||
},
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(0.99),
|
||||
Value: proto.Float64(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("name_1"),
|
||||
Value: proto.String("value 1"),
|
||||
},
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("name_2"),
|
||||
Value: proto.String("value 2"),
|
||||
},
|
||||
},
|
||||
Summary: &dto.Summary{
|
||||
SampleCount: proto.Uint64(4711),
|
||||
SampleSum: proto.Float64(2010.1971),
|
||||
Quantile: []*dto.Quantile{
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(0.5),
|
||||
Value: proto.Float64(1),
|
||||
},
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(0.9),
|
||||
Value: proto.Float64(2),
|
||||
},
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(0.99),
|
||||
Value: proto.Float64(3),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
out: `# HELP summary_name summary docstring
|
||||
# TYPE summary_name summary
|
||||
summary_name{quantile="0.5"} -1.23
|
||||
summary_name{quantile="0.9"} 0.2342354
|
||||
summary_name{quantile="0.99"} 0
|
||||
summary_name_sum -3.4567
|
||||
summary_name_count 42
|
||||
summary_name{name_1="value 1",name_2="value 2",quantile="0.5"} 1
|
||||
summary_name{name_1="value 1",name_2="value 2",quantile="0.9"} 2
|
||||
summary_name{name_1="value 1",name_2="value 2",quantile="0.99"} 3
|
||||
summary_name_sum{name_1="value 1",name_2="value 2"} 2010.1971
|
||||
summary_name_count{name_1="value 1",name_2="value 2"} 4711
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
out := bytes.NewBuffer(make([]byte, 0, len(scenario.out)))
|
||||
n, err := MetricFamilyToText(out, scenario.in)
|
||||
if err != nil {
|
||||
t.Errorf("%d. error: %s", i, err)
|
||||
continue
|
||||
}
|
||||
if expected, got := len(scenario.out), n; expected != got {
|
||||
t.Errorf(
|
||||
"%d. expected %d bytes written, got %d",
|
||||
i, expected, got,
|
||||
)
|
||||
}
|
||||
if expected, got := scenario.out, out.String(); expected != got {
|
||||
t.Errorf(
|
||||
"%d. expected out=%q, got %q",
|
||||
i, expected, got,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCreate(t *testing.T) {
|
||||
testCreate(t)
|
||||
}
|
||||
|
||||
func BenchmarkCreate(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testCreate(b)
|
||||
}
|
||||
}
|
||||
|
||||
func testCreateError(t testing.TB) {
|
||||
var scenarios = []struct {
|
||||
in *dto.MetricFamily
|
||||
err string
|
||||
}{
|
||||
// 0: No metric.
|
||||
{
|
||||
in: &dto.MetricFamily{
|
||||
Name: proto.String("name"),
|
||||
Help: proto.String("doc string"),
|
||||
Type: dto.MetricType_COUNTER.Enum(),
|
||||
Metric: []*dto.Metric{},
|
||||
},
|
||||
err: "MetricFamily has no metrics",
|
||||
},
|
||||
// 1: No metric name.
|
||||
{
|
||||
in: &dto.MetricFamily{
|
||||
Help: proto.String("doc string"),
|
||||
Type: dto.MetricType_UNTYPED.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(math.Inf(-1)),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
err: "MetricFamily has no name",
|
||||
},
|
||||
// 2: No metric type.
|
||||
{
|
||||
in: &dto.MetricFamily{
|
||||
Name: proto.String("name"),
|
||||
Help: proto.String("doc string"),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(math.Inf(-1)),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
err: "MetricFamily has no type",
|
||||
},
|
||||
// 3: Wrong type.
|
||||
{
|
||||
in: &dto.MetricFamily{
|
||||
Name: proto.String("name"),
|
||||
Help: proto.String("doc string"),
|
||||
Type: dto.MetricType_COUNTER.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(math.Inf(-1)),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
err: "expected counter in metric",
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
var out bytes.Buffer
|
||||
_, err := MetricFamilyToText(&out, scenario.in)
|
||||
if err == nil {
|
||||
t.Errorf("%d. expected error, got nil", i)
|
||||
continue
|
||||
}
|
||||
if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 {
|
||||
t.Errorf(
|
||||
"%d. expected error starting with %q, got %q",
|
||||
i, expected, got,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCreateError(t *testing.T) {
|
||||
testCreateError(t)
|
||||
}
|
||||
|
||||
func BenchmarkCreateError(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testCreateError(b)
|
||||
}
|
||||
}
|
||||
659
Godeps/_workspace/src/github.com/prometheus/client_golang/text/parse.go
generated
vendored
Normal file
659
Godeps/_workspace/src/github.com/prometheus/client_golang/text/parse.go
generated
vendored
Normal file
@@ -0,0 +1,659 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package text
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"code.google.com/p/goprotobuf/proto"
|
||||
"github.com/prometheus/client_golang/model"
|
||||
)
|
||||
|
||||
// A stateFn is a function that represents a state in a state machine. By
|
||||
// executing it, the state is progressed to the next state. The stateFn returns
|
||||
// another stateFn, which represents the new state. The end state is represented
|
||||
// by nil.
|
||||
type stateFn func() stateFn
|
||||
|
||||
// ParseError signals errors while parsing the simple and flat text-based
|
||||
// exchange format.
|
||||
type ParseError struct {
|
||||
Line int
|
||||
Msg string
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e ParseError) Error() string {
|
||||
return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
|
||||
}
|
||||
|
||||
// Parser is used to parse the simple and flat text-based exchange format. Its
|
||||
// nil value is ready to use.
|
||||
type Parser struct {
|
||||
metricFamiliesByName map[string]*dto.MetricFamily
|
||||
buf *bufio.Reader // Where the parsed input is read through.
|
||||
err error // Most recent error.
|
||||
lineCount int // Tracks the line count for error messages.
|
||||
currentByte byte // The most recent byte read.
|
||||
currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
|
||||
currentMF *dto.MetricFamily
|
||||
currentMetric *dto.Metric
|
||||
currentLabelPair *dto.LabelPair
|
||||
|
||||
// The remaining member variables are only used for summaries.
|
||||
summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
|
||||
currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'.
|
||||
currentQuantile float64
|
||||
// These tell us if the currently processed line ends on '_count' or
|
||||
// '_sum' respectively and belong to a summary, representing the sample
|
||||
// count and sum of that summary.
|
||||
currentIsSummaryCount, currentIsSummarySum bool
|
||||
}
|
||||
|
||||
// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
|
||||
// format and creates MetricFamily proto messages. It returns the MetricFamily
|
||||
// proto messages in a map where the metric names are the keys, along with any
|
||||
// error encountered.
|
||||
//
|
||||
// If the input contains duplicate metrics (i.e. lines with the same metric name
|
||||
// and exactly the same label set), the resulting MetricFamily will contain
|
||||
// duplicate Metric proto messages. Similar is true for duplicate label
|
||||
// names. Checks for duplicates have to be performed separately, if required.
|
||||
//
|
||||
// Summaries are a rather special beast. You would probably not use them in the
|
||||
// simple text format anyway. This method can deal with summaries if they are
|
||||
// presented in exactly the way the text.Create function creates them.
|
||||
//
|
||||
// This method must not be called concurrently. If you want to parse different
|
||||
// input concurrently, instantiate a separate Parser for each goroutine.
|
||||
func (p *Parser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
|
||||
p.reset(in)
|
||||
for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
|
||||
// Magic happens here...
|
||||
}
|
||||
// Get rid of empty metric families.
|
||||
for k, mf := range p.metricFamiliesByName {
|
||||
if len(mf.GetMetric()) == 0 {
|
||||
delete(p.metricFamiliesByName, k)
|
||||
}
|
||||
}
|
||||
return p.metricFamiliesByName, p.err
|
||||
}
|
||||
|
||||
func (p *Parser) reset(in io.Reader) {
|
||||
p.metricFamiliesByName = map[string]*dto.MetricFamily{}
|
||||
if p.buf == nil {
|
||||
p.buf = bufio.NewReader(in)
|
||||
} else {
|
||||
p.buf.Reset(in)
|
||||
}
|
||||
p.err = nil
|
||||
p.lineCount = 0
|
||||
if p.summaries == nil || len(p.summaries) > 0 {
|
||||
p.summaries = map[uint64]*dto.Metric{}
|
||||
}
|
||||
p.currentQuantile = math.NaN()
|
||||
}
|
||||
|
||||
// startOfLine represents the state where the next byte read from p.buf is the
|
||||
// start of a line (or whitespace leading up to it).
|
||||
func (p *Parser) startOfLine() stateFn {
|
||||
p.lineCount++
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
// End of input reached. This is the only case where
|
||||
// that is not an error but a signal that we are done.
|
||||
p.err = nil
|
||||
return nil
|
||||
}
|
||||
switch p.currentByte {
|
||||
case '#':
|
||||
return p.startComment
|
||||
case '\n':
|
||||
return p.startOfLine // Empty line, start the next one.
|
||||
}
|
||||
return p.readingMetricName
|
||||
}
|
||||
|
||||
// startComment represents the state where the next byte read from p.buf is the
|
||||
// start of a comment (or whitespace leading up to it).
|
||||
func (p *Parser) startComment() stateFn {
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.currentByte == '\n' {
|
||||
return p.startOfLine
|
||||
}
|
||||
if p.readTokenUntilWhitespace(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
// If we have hit the end of line already, there is nothing left
|
||||
// to do. This is not considered a syntax error.
|
||||
if p.currentByte == '\n' {
|
||||
return p.startOfLine
|
||||
}
|
||||
keyword := p.currentToken.String()
|
||||
if keyword != "HELP" && keyword != "TYPE" {
|
||||
// Generic comment, ignore by fast forwarding to end of line.
|
||||
for p.currentByte != '\n' {
|
||||
if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
}
|
||||
return p.startOfLine
|
||||
}
|
||||
// There is something. Next has to be a metric name.
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.readTokenAsMetricName(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.currentByte == '\n' {
|
||||
// At the end of the line already.
|
||||
// Again, this is not considered a syntax error.
|
||||
return p.startOfLine
|
||||
}
|
||||
if !isBlankOrTab(p.currentByte) {
|
||||
p.parseError("invalid metric name in comment")
|
||||
return nil
|
||||
}
|
||||
p.setOrCreateCurrentMF()
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.currentByte == '\n' {
|
||||
// At the end of the line already.
|
||||
// Again, this is not considered a syntax error.
|
||||
return p.startOfLine
|
||||
}
|
||||
switch keyword {
|
||||
case "HELP":
|
||||
return p.readingHelp
|
||||
case "TYPE":
|
||||
return p.readingType
|
||||
}
|
||||
panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
|
||||
}
|
||||
|
||||
// readingMetricName represents the state where the last byte read (now in
|
||||
// p.currentByte) is the first byte of a metric name.
|
||||
func (p *Parser) readingMetricName() stateFn {
|
||||
if p.readTokenAsMetricName(); p.err != nil {
|
||||
return nil
|
||||
}
|
||||
if p.currentToken.Len() == 0 {
|
||||
p.parseError("invalid metric name")
|
||||
return nil
|
||||
}
|
||||
p.setOrCreateCurrentMF()
|
||||
// Now is the time to fix the type if it hasn't happened yet.
|
||||
if p.currentMF.Type == nil {
|
||||
p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
|
||||
}
|
||||
p.currentMetric = &dto.Metric{}
|
||||
// Do not append the newly created currentMetric to
|
||||
// currentMF.Metric right now. First wait if this is a summary,
|
||||
// and the metric exists already, which we can only know after
|
||||
// having read all the labels.
|
||||
if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
return p.readingLabels
|
||||
}
|
||||
|
||||
// readingLabels represents the state where the last byte read (now in
|
||||
// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
|
||||
// first byte of the value (otherwise).
|
||||
func (p *Parser) readingLabels() stateFn {
|
||||
// Alas, summaries are really special... We have to reset the
|
||||
// currentLabels map and the currentQuantile before starting to
|
||||
// read labels.
|
||||
if p.currentMF.GetType() == dto.MetricType_SUMMARY {
|
||||
p.currentLabels = map[string]string{}
|
||||
p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
|
||||
p.currentQuantile = math.NaN()
|
||||
}
|
||||
if p.currentByte != '{' {
|
||||
return p.readingValue
|
||||
}
|
||||
return p.startLabelName
|
||||
}
|
||||
|
||||
// startLabelName represents the state where the next byte read from p.buf is
|
||||
// the start of a label name (or whitespace leading up to it).
|
||||
func (p *Parser) startLabelName() stateFn {
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.currentByte == '}' {
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
return p.readingValue
|
||||
}
|
||||
if p.readTokenAsLabelName(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.currentToken.Len() == 0 {
|
||||
p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
|
||||
return nil
|
||||
}
|
||||
p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
|
||||
if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
|
||||
p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
|
||||
return nil
|
||||
}
|
||||
// Once more, special summary treatment... Don't add 'quantile'
|
||||
// labels to 'real' labels.
|
||||
if p.currentMF.GetType() != dto.MetricType_SUMMARY ||
|
||||
p.currentLabelPair.GetName() != "quantile" {
|
||||
p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
|
||||
}
|
||||
if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.currentByte != '=' {
|
||||
p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
|
||||
return nil
|
||||
}
|
||||
return p.startLabelValue
|
||||
}
|
||||
|
||||
// startLabelValue represents the state where the next byte read from p.buf is
|
||||
// the start of a (quoted) label value (or whitespace leading up to it).
|
||||
func (p *Parser) startLabelValue() stateFn {
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.currentByte != '"' {
|
||||
p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
|
||||
return nil
|
||||
}
|
||||
if p.readTokenAsLabelValue(); p.err != nil {
|
||||
return nil
|
||||
}
|
||||
p.currentLabelPair.Value = proto.String(p.currentToken.String())
|
||||
// Once more, special treatment of summaries:
|
||||
// - Quantile labels are special, will result in dto.Quantile later.
|
||||
// - Other labels have to be added to currentLabels for signature calculation.
|
||||
if p.currentMF.GetType() == dto.MetricType_SUMMARY {
|
||||
if p.currentLabelPair.GetName() == "quantile" {
|
||||
if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
|
||||
// Create a more helpful error message.
|
||||
p.parseError(fmt.Sprintf("expected float as value for quantile label, got %q", p.currentLabelPair.GetValue()))
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
|
||||
}
|
||||
}
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
switch p.currentByte {
|
||||
case ',':
|
||||
return p.startLabelName
|
||||
|
||||
case '}':
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
return p.readingValue
|
||||
default:
|
||||
p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// readingValue represents the state where the last byte read (now in
|
||||
// p.currentByte) is the first byte of the sample value (i.e. a float).
|
||||
func (p *Parser) readingValue() stateFn {
|
||||
// When we are here, we have read all the labels, so for the
|
||||
// infamous special case of a summary, we can finally find out
|
||||
// if the metric already exists.
|
||||
if p.currentMF.GetType() == dto.MetricType_SUMMARY {
|
||||
signature := model.LabelsToSignature(p.currentLabels)
|
||||
if summary := p.summaries[signature]; summary != nil {
|
||||
p.currentMetric = summary
|
||||
} else {
|
||||
p.summaries[signature] = p.currentMetric
|
||||
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
|
||||
}
|
||||
} else {
|
||||
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
|
||||
}
|
||||
if p.readTokenUntilWhitespace(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
value, err := strconv.ParseFloat(p.currentToken.String(), 64)
|
||||
if err != nil {
|
||||
// Create a more helpful error message.
|
||||
p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
|
||||
return nil
|
||||
}
|
||||
switch p.currentMF.GetType() {
|
||||
case dto.MetricType_COUNTER:
|
||||
p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
|
||||
case dto.MetricType_GAUGE:
|
||||
p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
|
||||
case dto.MetricType_UNTYPED:
|
||||
p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
|
||||
case dto.MetricType_SUMMARY:
|
||||
// *sigh*
|
||||
if p.currentMetric.Summary == nil {
|
||||
p.currentMetric.Summary = &dto.Summary{}
|
||||
}
|
||||
switch {
|
||||
case p.currentIsSummaryCount:
|
||||
p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
|
||||
case p.currentIsSummarySum:
|
||||
p.currentMetric.Summary.SampleSum = proto.Float64(value)
|
||||
case !math.IsNaN(p.currentQuantile):
|
||||
p.currentMetric.Summary.Quantile = append(
|
||||
p.currentMetric.Summary.Quantile,
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(p.currentQuantile),
|
||||
Value: proto.Float64(value),
|
||||
},
|
||||
)
|
||||
}
|
||||
default:
|
||||
p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
|
||||
}
|
||||
if p.currentByte == '\n' {
|
||||
return p.startOfLine
|
||||
}
|
||||
return p.startTimestamp
|
||||
}
|
||||
|
||||
// startTimestamp represents the state where the next byte read from p.buf is
|
||||
// the start of the timestamp (or whitespace leading up to it).
|
||||
func (p *Parser) startTimestamp() stateFn {
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.readTokenUntilWhitespace(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
|
||||
if err != nil {
|
||||
// Create a more helpful error message.
|
||||
p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
|
||||
return nil
|
||||
}
|
||||
p.currentMetric.TimestampMs = proto.Int64(timestamp)
|
||||
if p.readTokenUntilNewline(false); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.currentToken.Len() > 0 {
|
||||
p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
|
||||
return nil
|
||||
}
|
||||
return p.startOfLine
|
||||
}
|
||||
|
||||
// readingHelp represents the state where the last byte read (now in
|
||||
// p.currentByte) is the first byte of the docstring after 'HELP'.
|
||||
func (p *Parser) readingHelp() stateFn {
|
||||
if p.currentMF.Help != nil {
|
||||
p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
|
||||
return nil
|
||||
}
|
||||
// Rest of line is the docstring.
|
||||
if p.readTokenUntilNewline(true); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
p.currentMF.Help = proto.String(p.currentToken.String())
|
||||
return p.startOfLine
|
||||
}
|
||||
|
||||
// readingType represents the state where the last byte read (now in
|
||||
// p.currentByte) is the first byte of the type hint after 'HELP'.
|
||||
func (p *Parser) readingType() stateFn {
|
||||
if p.currentMF.Type != nil {
|
||||
p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
|
||||
return nil
|
||||
}
|
||||
// Rest of line is the type.
|
||||
if p.readTokenUntilNewline(false); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
|
||||
if !ok {
|
||||
p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
|
||||
return nil
|
||||
}
|
||||
p.currentMF.Type = dto.MetricType(metricType).Enum()
|
||||
return p.startOfLine
|
||||
}
|
||||
|
||||
// parseError sets p.err to a ParseError at the current line with the given
|
||||
// message.
|
||||
func (p *Parser) parseError(msg string) {
|
||||
p.err = ParseError{
|
||||
Line: p.lineCount,
|
||||
Msg: msg,
|
||||
}
|
||||
}
|
||||
|
||||
// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
|
||||
// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
|
||||
func (p *Parser) skipBlankTab() {
|
||||
for {
|
||||
if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
|
||||
// anything if p.currentByte is neither ' ' nor '\t'.
|
||||
func (p *Parser) skipBlankTabIfCurrentBlankTab() {
|
||||
if isBlankOrTab(p.currentByte) {
|
||||
p.skipBlankTab()
|
||||
}
|
||||
}
|
||||
|
||||
// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
|
||||
// first byte considered is the byte already read (now in p.currentByte). The
|
||||
// first whitespace byte encountered is still copied into p.currentByte, but not
|
||||
// into p.currentToken.
|
||||
func (p *Parser) readTokenUntilWhitespace() {
|
||||
p.currentToken.Reset()
|
||||
for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
p.currentByte, p.err = p.buf.ReadByte()
|
||||
}
|
||||
}
|
||||
|
||||
// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
|
||||
// byte considered is the byte already read (now in p.currentByte). The first
|
||||
// newline byte encountered is still copied into p.currentByte, but not into
|
||||
// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
|
||||
// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All
|
||||
// other escape sequences are invalid and cause an error.
|
||||
func (p *Parser) readTokenUntilNewline(recognizeEscapeSequence bool) {
|
||||
p.currentToken.Reset()
|
||||
escaped := false
|
||||
for p.err == nil {
|
||||
if recognizeEscapeSequence && escaped {
|
||||
switch p.currentByte {
|
||||
case '\\':
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
case 'n':
|
||||
p.currentToken.WriteByte('\n')
|
||||
default:
|
||||
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
|
||||
return
|
||||
}
|
||||
escaped = false
|
||||
} else {
|
||||
switch p.currentByte {
|
||||
case '\n':
|
||||
return
|
||||
case '\\':
|
||||
escaped = true
|
||||
default:
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
}
|
||||
}
|
||||
p.currentByte, p.err = p.buf.ReadByte()
|
||||
}
|
||||
}
|
||||
|
||||
// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
|
||||
// The first byte considered is the byte already read (now in p.currentByte).
|
||||
// The first byte not part of a metric name is still copied into p.currentByte,
|
||||
// but not into p.currentToken.
|
||||
func (p *Parser) readTokenAsMetricName() {
|
||||
p.currentToken.Reset()
|
||||
if !isValidMetricNameStart(p.currentByte) {
|
||||
return
|
||||
}
|
||||
for {
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
p.currentByte, p.err = p.buf.ReadByte()
|
||||
if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
|
||||
// The first byte considered is the byte already read (now in p.currentByte).
|
||||
// The first byte not part of a label name is still copied into p.currentByte,
|
||||
// but not into p.currentToken.
|
||||
func (p *Parser) readTokenAsLabelName() {
|
||||
p.currentToken.Reset()
|
||||
if !isValidLabelNameStart(p.currentByte) {
|
||||
return
|
||||
}
|
||||
for {
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
p.currentByte, p.err = p.buf.ReadByte()
|
||||
if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
|
||||
// In contrast to the other 'readTokenAs...' functions, which start with the
|
||||
// last read byte in p.currentByte, this method ignores p.currentByte and starts
|
||||
// with reading a new byte from p.buf. The first byte not part of a label value
|
||||
// is still copied into p.currentByte, but not into p.currentToken.
|
||||
func (p *Parser) readTokenAsLabelValue() {
|
||||
p.currentToken.Reset()
|
||||
escaped := false
|
||||
for {
|
||||
if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
|
||||
return
|
||||
}
|
||||
if escaped {
|
||||
switch p.currentByte {
|
||||
case '"', '\\':
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
case 'n':
|
||||
p.currentToken.WriteByte('\n')
|
||||
default:
|
||||
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
|
||||
return
|
||||
}
|
||||
escaped = false
|
||||
continue
|
||||
}
|
||||
switch p.currentByte {
|
||||
case '"':
|
||||
return
|
||||
case '\n':
|
||||
p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
|
||||
return
|
||||
case '\\':
|
||||
escaped = true
|
||||
default:
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Parser) setOrCreateCurrentMF() {
|
||||
p.currentIsSummaryCount = false
|
||||
p.currentIsSummarySum = false
|
||||
name := p.currentToken.String()
|
||||
if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
|
||||
return
|
||||
}
|
||||
// Try out if this is a _sum or _count for a summary.
|
||||
summaryName := summaryMetricName(name)
|
||||
if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
|
||||
if p.currentMF.GetType() == dto.MetricType_SUMMARY {
|
||||
if isCount(name) {
|
||||
p.currentIsSummaryCount = true
|
||||
}
|
||||
if isSum(name) {
|
||||
p.currentIsSummarySum = true
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
|
||||
p.metricFamiliesByName[name] = p.currentMF
|
||||
}
|
||||
|
||||
func isValidLabelNameStart(b byte) bool {
|
||||
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
|
||||
}
|
||||
|
||||
func isValidLabelNameContinuation(b byte) bool {
|
||||
return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
|
||||
}
|
||||
|
||||
func isValidMetricNameStart(b byte) bool {
|
||||
return isValidLabelNameStart(b) || b == ':'
|
||||
}
|
||||
|
||||
func isValidMetricNameContinuation(b byte) bool {
|
||||
return isValidLabelNameContinuation(b) || b == ':'
|
||||
}
|
||||
|
||||
func isBlankOrTab(b byte) bool {
|
||||
return b == ' ' || b == '\t'
|
||||
}
|
||||
|
||||
func isCount(name string) bool {
|
||||
return len(name) > 6 && name[len(name)-6:] == "_count"
|
||||
}
|
||||
|
||||
func isSum(name string) bool {
|
||||
return len(name) > 4 && name[len(name)-4:] == "_sum"
|
||||
}
|
||||
|
||||
func summaryMetricName(name string) string {
|
||||
switch {
|
||||
case isCount(name):
|
||||
return name[:len(name)-6]
|
||||
case isSum(name):
|
||||
return name[:len(name)-4]
|
||||
default:
|
||||
return name
|
||||
}
|
||||
}
|
||||
529
Godeps/_workspace/src/github.com/prometheus/client_golang/text/parse_test.go
generated
vendored
Normal file
529
Godeps/_workspace/src/github.com/prometheus/client_golang/text/parse_test.go
generated
vendored
Normal file
@@ -0,0 +1,529 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package text
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"code.google.com/p/goprotobuf/proto"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
var parser Parser
|
||||
|
||||
func testParse(t testing.TB) {
|
||||
var scenarios = []struct {
|
||||
in string
|
||||
out []*dto.MetricFamily
|
||||
}{
|
||||
// 0: Empty lines as input.
|
||||
{
|
||||
in: `
|
||||
|
||||
`,
|
||||
out: []*dto.MetricFamily{},
|
||||
},
|
||||
// 1: Minimal case.
|
||||
{
|
||||
in: `
|
||||
minimal_metric 1.234
|
||||
another_metric -3e3 103948
|
||||
# Even that:
|
||||
no_labels{} 3
|
||||
# HELP line for non-existing metric will be ignored.
|
||||
`,
|
||||
out: []*dto.MetricFamily{
|
||||
&dto.MetricFamily{
|
||||
Name: proto.String("minimal_metric"),
|
||||
Type: dto.MetricType_UNTYPED.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(1.234),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&dto.MetricFamily{
|
||||
Name: proto.String("another_metric"),
|
||||
Type: dto.MetricType_UNTYPED.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(-3e3),
|
||||
},
|
||||
TimestampMs: proto.Int64(103948),
|
||||
},
|
||||
},
|
||||
},
|
||||
&dto.MetricFamily{
|
||||
Name: proto.String("no_labels"),
|
||||
Type: dto.MetricType_UNTYPED.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(3),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// 2: Counters & gauges, docstrings, various whitespace, escape sequences.
|
||||
{
|
||||
in: `
|
||||
# A normal comment.
|
||||
#
|
||||
# TYPE name counter
|
||||
name{labelname="val1",basename="basevalue"} NaN
|
||||
name {labelname="val2",basename="base\"v\\al\nue"} 0.23 1234567890
|
||||
# HELP name two-line\n doc str\\ing
|
||||
|
||||
# HELP name2 doc str"ing 2
|
||||
# TYPE name2 gauge
|
||||
name2{labelname="val2" ,basename = "basevalue2" } +Inf 54321
|
||||
name2{ labelname = "val1" , }-Inf
|
||||
`,
|
||||
out: []*dto.MetricFamily{
|
||||
&dto.MetricFamily{
|
||||
Name: proto.String("name"),
|
||||
Help: proto.String("two-line\n doc str\\ing"),
|
||||
Type: dto.MetricType_COUNTER.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("labelname"),
|
||||
Value: proto.String("val1"),
|
||||
},
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("basename"),
|
||||
Value: proto.String("basevalue"),
|
||||
},
|
||||
},
|
||||
Counter: &dto.Counter{
|
||||
Value: proto.Float64(math.NaN()),
|
||||
},
|
||||
},
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("labelname"),
|
||||
Value: proto.String("val2"),
|
||||
},
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("basename"),
|
||||
Value: proto.String("base\"v\\al\nue"),
|
||||
},
|
||||
},
|
||||
Counter: &dto.Counter{
|
||||
Value: proto.Float64(.23),
|
||||
},
|
||||
TimestampMs: proto.Int64(1234567890),
|
||||
},
|
||||
},
|
||||
},
|
||||
&dto.MetricFamily{
|
||||
Name: proto.String("name2"),
|
||||
Help: proto.String("doc str\"ing 2"),
|
||||
Type: dto.MetricType_GAUGE.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("labelname"),
|
||||
Value: proto.String("val2"),
|
||||
},
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("basename"),
|
||||
Value: proto.String("basevalue2"),
|
||||
},
|
||||
},
|
||||
Gauge: &dto.Gauge{
|
||||
Value: proto.Float64(math.Inf(+1)),
|
||||
},
|
||||
TimestampMs: proto.Int64(54321),
|
||||
},
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("labelname"),
|
||||
Value: proto.String("val1"),
|
||||
},
|
||||
},
|
||||
Gauge: &dto.Gauge{
|
||||
Value: proto.Float64(math.Inf(-1)),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// 3: The evil summary, mixed with other types and funny comments.
|
||||
{
|
||||
in: `
|
||||
# TYPE my_summary summary
|
||||
my_summary{n1="val1",quantile="0.5"} 110
|
||||
decoy -1 -2
|
||||
my_summary{n1="val1",quantile="0.9"} 140 1
|
||||
my_summary_count{n1="val1"} 42
|
||||
# Latest timestamp wins in case of a summary.
|
||||
my_summary_sum{n1="val1"} 4711 2
|
||||
fake_sum{n1="val1"} 2001
|
||||
# TYPE another_summary summary
|
||||
another_summary_count{n2="val2",n1="val1"} 20
|
||||
my_summary_count{n2="val2",n1="val1"} 5 5
|
||||
another_summary{n1="val1",n2="val2",quantile=".3"} -1.2
|
||||
my_summary_sum{n1="val2"} 08 15
|
||||
my_summary{n1="val3", quantile="0.2"} 4711
|
||||
my_summary{n1="val1",n2="val2",quantile="-12.34",} NaN
|
||||
# some
|
||||
# funny comments
|
||||
# HELP
|
||||
# HELP
|
||||
# HELP my_summary
|
||||
# HELP my_summary
|
||||
`,
|
||||
out: []*dto.MetricFamily{
|
||||
&dto.MetricFamily{
|
||||
Name: proto.String("fake_sum"),
|
||||
Type: dto.MetricType_UNTYPED.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("n1"),
|
||||
Value: proto.String("val1"),
|
||||
},
|
||||
},
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(2001),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&dto.MetricFamily{
|
||||
Name: proto.String("decoy"),
|
||||
Type: dto.MetricType_UNTYPED.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(-1),
|
||||
},
|
||||
TimestampMs: proto.Int64(-2),
|
||||
},
|
||||
},
|
||||
},
|
||||
&dto.MetricFamily{
|
||||
Name: proto.String("my_summary"),
|
||||
Type: dto.MetricType_SUMMARY.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("n1"),
|
||||
Value: proto.String("val1"),
|
||||
},
|
||||
},
|
||||
Summary: &dto.Summary{
|
||||
SampleCount: proto.Uint64(42),
|
||||
SampleSum: proto.Float64(4711),
|
||||
Quantile: []*dto.Quantile{
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(0.5),
|
||||
Value: proto.Float64(110),
|
||||
},
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(0.9),
|
||||
Value: proto.Float64(140),
|
||||
},
|
||||
},
|
||||
},
|
||||
TimestampMs: proto.Int64(2),
|
||||
},
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("n2"),
|
||||
Value: proto.String("val2"),
|
||||
},
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("n1"),
|
||||
Value: proto.String("val1"),
|
||||
},
|
||||
},
|
||||
Summary: &dto.Summary{
|
||||
SampleCount: proto.Uint64(5),
|
||||
Quantile: []*dto.Quantile{
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(-12.34),
|
||||
Value: proto.Float64(math.NaN()),
|
||||
},
|
||||
},
|
||||
},
|
||||
TimestampMs: proto.Int64(5),
|
||||
},
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("n1"),
|
||||
Value: proto.String("val2"),
|
||||
},
|
||||
},
|
||||
Summary: &dto.Summary{
|
||||
SampleSum: proto.Float64(8),
|
||||
},
|
||||
TimestampMs: proto.Int64(15),
|
||||
},
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("n1"),
|
||||
Value: proto.String("val3"),
|
||||
},
|
||||
},
|
||||
Summary: &dto.Summary{
|
||||
Quantile: []*dto.Quantile{
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(0.2),
|
||||
Value: proto.Float64(4711),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&dto.MetricFamily{
|
||||
Name: proto.String("another_summary"),
|
||||
Type: dto.MetricType_SUMMARY.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("n2"),
|
||||
Value: proto.String("val2"),
|
||||
},
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("n1"),
|
||||
Value: proto.String("val1"),
|
||||
},
|
||||
},
|
||||
Summary: &dto.Summary{
|
||||
SampleCount: proto.Uint64(20),
|
||||
Quantile: []*dto.Quantile{
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(0.3),
|
||||
Value: proto.Float64(-1.2),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
out, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in))
|
||||
if err != nil {
|
||||
t.Errorf("%d. error: %s", i, err)
|
||||
continue
|
||||
}
|
||||
if expected, got := len(scenario.out), len(out); expected != got {
|
||||
t.Errorf(
|
||||
"%d. expected %d MetricFamilies, got %d",
|
||||
i, expected, got,
|
||||
)
|
||||
}
|
||||
for _, expected := range scenario.out {
|
||||
got, ok := out[expected.GetName()]
|
||||
if !ok {
|
||||
t.Errorf(
|
||||
"%d. expected MetricFamily %q, found none",
|
||||
i, expected.GetName(),
|
||||
)
|
||||
continue
|
||||
}
|
||||
if expected.String() != got.String() {
|
||||
t.Errorf(
|
||||
"%d. expected MetricFamily %s, got %s",
|
||||
i, expected, got,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
testParse(t)
|
||||
}
|
||||
|
||||
func BenchmarkParse(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testParse(b)
|
||||
}
|
||||
}
|
||||
|
||||
func testParseError(t testing.TB) {
|
||||
var scenarios = []struct {
|
||||
in string
|
||||
err string
|
||||
}{
|
||||
// 0: No new-line at end of input.
|
||||
{
|
||||
in: `bla 3.14`,
|
||||
err: "EOF",
|
||||
},
|
||||
// 1: Invalid escape sequence in label value.
|
||||
{
|
||||
in: `metric{label="\t"} 3.14`,
|
||||
err: "text format parsing error in line 1: invalid escape sequence",
|
||||
},
|
||||
// 2: Newline in label value.
|
||||
{
|
||||
in: `
|
||||
metric{label="new
|
||||
line"} 3.14
|
||||
`,
|
||||
err: `text format parsing error in line 2: label value "new" contains unescaped new-line`,
|
||||
},
|
||||
// 3:
|
||||
{
|
||||
in: `metric{@="bla"} 3.14`,
|
||||
err: "text format parsing error in line 1: invalid label name for metric",
|
||||
},
|
||||
// 4:
|
||||
{
|
||||
in: `metric{__name__="bla"} 3.14`,
|
||||
err: `text format parsing error in line 1: label name "__name__" is reserved`,
|
||||
},
|
||||
// 5:
|
||||
{
|
||||
in: `metric{label+="bla"} 3.14`,
|
||||
err: "text format parsing error in line 1: expected '=' after label name",
|
||||
},
|
||||
// 6:
|
||||
{
|
||||
in: `metric{label=bla} 3.14`,
|
||||
err: "text format parsing error in line 1: expected '\"' at start of label value",
|
||||
},
|
||||
// 7:
|
||||
{
|
||||
in: `
|
||||
# TYPE metric summary
|
||||
metric{quantile="bla"} 3.14
|
||||
`,
|
||||
err: "text format parsing error in line 3: expected float as value for quantile label",
|
||||
},
|
||||
// 8:
|
||||
{
|
||||
in: `metric{label="bla"+} 3.14`,
|
||||
err: "text format parsing error in line 1: unexpected end of label value",
|
||||
},
|
||||
// 9:
|
||||
{
|
||||
in: `metric{label="bla"} 3.14 2.72
|
||||
`,
|
||||
err: "text format parsing error in line 1: expected integer as timestamp",
|
||||
},
|
||||
// 10:
|
||||
{
|
||||
in: `metric{label="bla"} 3.14 2 3
|
||||
`,
|
||||
err: "text format parsing error in line 1: spurious string after timestamp",
|
||||
},
|
||||
// 11:
|
||||
{
|
||||
in: `metric{label="bla"} blubb
|
||||
`,
|
||||
err: "text format parsing error in line 1: expected float as value",
|
||||
},
|
||||
// 12:
|
||||
{
|
||||
in: `
|
||||
# HELP metric one
|
||||
# HELP metric two
|
||||
`,
|
||||
err: "text format parsing error in line 3: second HELP line for metric name",
|
||||
},
|
||||
// 13:
|
||||
{
|
||||
in: `
|
||||
# TYPE metric counter
|
||||
# TYPE metric untyped
|
||||
`,
|
||||
err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`,
|
||||
},
|
||||
// 14:
|
||||
{
|
||||
in: `
|
||||
metric 4.12
|
||||
# TYPE metric counter
|
||||
`,
|
||||
err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`,
|
||||
},
|
||||
// 14:
|
||||
{
|
||||
in: `
|
||||
# TYPE metric bla
|
||||
`,
|
||||
err: "text format parsing error in line 2: unknown metric type",
|
||||
},
|
||||
// 15:
|
||||
{
|
||||
in: `
|
||||
# TYPE met-ric
|
||||
`,
|
||||
err: "text format parsing error in line 2: invalid metric name in comment",
|
||||
},
|
||||
// 16:
|
||||
{
|
||||
in: `@invalidmetric{label="bla"} 3.14 2`,
|
||||
err: "text format parsing error in line 1: invalid metric name",
|
||||
},
|
||||
// 17:
|
||||
{
|
||||
in: `{label="bla"} 3.14 2`,
|
||||
err: "text format parsing error in line 1: invalid metric name",
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
_, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in))
|
||||
if err == nil {
|
||||
t.Errorf("%d. expected error, got nil", i)
|
||||
continue
|
||||
}
|
||||
if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 {
|
||||
t.Errorf(
|
||||
"%d. expected error starting with %q, got %q",
|
||||
i, expected, got,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestParseError(t *testing.T) {
|
||||
testParseError(t)
|
||||
}
|
||||
|
||||
func BenchmarkParseError(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testParseError(b)
|
||||
}
|
||||
}
|
||||
43
Godeps/_workspace/src/github.com/prometheus/client_golang/text/proto.go
generated
vendored
Normal file
43
Godeps/_workspace/src/github.com/prometheus/client_golang/text/proto.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package text
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"code.google.com/p/goprotobuf/proto"
|
||||
"github.com/matttproud/golang_protobuf_extensions/ext"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// WriteProtoDelimited writes the MetricFamily to the writer in delimited
|
||||
// protobuf format and returns the number of bytes written and any error
|
||||
// encountered.
|
||||
func WriteProtoDelimited(w io.Writer, p *dto.MetricFamily) (int, error) {
|
||||
return ext.WriteDelimited(w, p)
|
||||
}
|
||||
|
||||
// WriteProtoText writes the MetricFamily to the writer in text format and
|
||||
// returns the number of bytes written and any error encountered.
|
||||
func WriteProtoText(w io.Writer, p *dto.MetricFamily) (int, error) {
|
||||
return fmt.Fprintf(w, "%s\n", proto.MarshalTextString(p))
|
||||
}
|
||||
|
||||
// WriteProtoCompactText writes the MetricFamily to the writer in compact text
|
||||
// format and returns the number of bytes written and any error encountered.
|
||||
func WriteProtoCompactText(w io.Writer, p *dto.MetricFamily) (int, error) {
|
||||
return fmt.Fprintf(w, "%s\n", p)
|
||||
}
|
||||
BIN
Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/protobuf
generated
vendored
Normal file
BIN
Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/protobuf
generated
vendored
Normal file
Binary file not shown.
BIN
Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/protobuf.gz
generated
vendored
Normal file
BIN
Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/protobuf.gz
generated
vendored
Normal file
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user