mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-03 19:58:17 +00:00
Level sets dependency graph to consume etcd 3.1.5
This commit is contained in:
186
vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go
generated
vendored
186
vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go
generated
vendored
@@ -32,7 +32,9 @@ var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
const _ = proto.ProtoPackageIsVersion1
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type Permission_Type int32
|
||||
|
||||
@@ -99,113 +101,113 @@ func init() {
|
||||
proto.RegisterType((*Role)(nil), "authpb.Role")
|
||||
proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
|
||||
}
|
||||
func (m *User) Marshal() (data []byte, err error) {
|
||||
func (m *User) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *User) MarshalTo(data []byte) (int, error) {
|
||||
func (m *User) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Name) > 0 {
|
||||
data[i] = 0xa
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintAuth(data, i, uint64(len(m.Name)))
|
||||
i += copy(data[i:], m.Name)
|
||||
i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
|
||||
i += copy(dAtA[i:], m.Name)
|
||||
}
|
||||
if len(m.Password) > 0 {
|
||||
data[i] = 0x12
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintAuth(data, i, uint64(len(m.Password)))
|
||||
i += copy(data[i:], m.Password)
|
||||
i = encodeVarintAuth(dAtA, i, uint64(len(m.Password)))
|
||||
i += copy(dAtA[i:], m.Password)
|
||||
}
|
||||
if len(m.Roles) > 0 {
|
||||
for _, s := range m.Roles {
|
||||
data[i] = 0x1a
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
data[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
l >>= 7
|
||||
i++
|
||||
}
|
||||
data[i] = uint8(l)
|
||||
dAtA[i] = uint8(l)
|
||||
i++
|
||||
i += copy(data[i:], s)
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *Permission) Marshal() (data []byte, err error) {
|
||||
func (m *Permission) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Permission) MarshalTo(data []byte) (int, error) {
|
||||
func (m *Permission) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.PermType != 0 {
|
||||
data[i] = 0x8
|
||||
dAtA[i] = 0x8
|
||||
i++
|
||||
i = encodeVarintAuth(data, i, uint64(m.PermType))
|
||||
i = encodeVarintAuth(dAtA, i, uint64(m.PermType))
|
||||
}
|
||||
if len(m.Key) > 0 {
|
||||
data[i] = 0x12
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintAuth(data, i, uint64(len(m.Key)))
|
||||
i += copy(data[i:], m.Key)
|
||||
i = encodeVarintAuth(dAtA, i, uint64(len(m.Key)))
|
||||
i += copy(dAtA[i:], m.Key)
|
||||
}
|
||||
if len(m.RangeEnd) > 0 {
|
||||
data[i] = 0x1a
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
i = encodeVarintAuth(data, i, uint64(len(m.RangeEnd)))
|
||||
i += copy(data[i:], m.RangeEnd)
|
||||
i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd)))
|
||||
i += copy(dAtA[i:], m.RangeEnd)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *Role) Marshal() (data []byte, err error) {
|
||||
func (m *Role) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Role) MarshalTo(data []byte) (int, error) {
|
||||
func (m *Role) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Name) > 0 {
|
||||
data[i] = 0xa
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintAuth(data, i, uint64(len(m.Name)))
|
||||
i += copy(data[i:], m.Name)
|
||||
i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
|
||||
i += copy(dAtA[i:], m.Name)
|
||||
}
|
||||
if len(m.KeyPermission) > 0 {
|
||||
for _, msg := range m.KeyPermission {
|
||||
data[i] = 0x12
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintAuth(data, i, uint64(msg.Size()))
|
||||
n, err := msg.MarshalTo(data[i:])
|
||||
i = encodeVarintAuth(dAtA, i, uint64(msg.Size()))
|
||||
n, err := msg.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -215,31 +217,31 @@ func (m *Role) MarshalTo(data []byte) (int, error) {
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Auth(data []byte, offset int, v uint64) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
data[offset+4] = uint8(v >> 32)
|
||||
data[offset+5] = uint8(v >> 40)
|
||||
data[offset+6] = uint8(v >> 48)
|
||||
data[offset+7] = uint8(v >> 56)
|
||||
func encodeFixed64Auth(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Auth(data []byte, offset int, v uint32) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
func encodeFixed32Auth(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintAuth(data []byte, offset int, v uint64) int {
|
||||
func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
data[offset] = uint8(v&0x7f | 0x80)
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
data[offset] = uint8(v)
|
||||
dAtA[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func (m *User) Size() (n int) {
|
||||
@@ -308,8 +310,8 @@ func sovAuth(x uint64) (n int) {
|
||||
func sozAuth(x uint64) (n int) {
|
||||
return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *User) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
func (m *User) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
@@ -321,7 +323,7 @@ func (m *User) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -349,7 +351,7 @@ func (m *User) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -363,7 +365,7 @@ func (m *User) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Name = append(m.Name[:0], data[iNdEx:postIndex]...)
|
||||
m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Name == nil {
|
||||
m.Name = []byte{}
|
||||
}
|
||||
@@ -380,7 +382,7 @@ func (m *User) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -394,7 +396,7 @@ func (m *User) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Password = append(m.Password[:0], data[iNdEx:postIndex]...)
|
||||
m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Password == nil {
|
||||
m.Password = []byte{}
|
||||
}
|
||||
@@ -411,7 +413,7 @@ func (m *User) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -426,11 +428,11 @@ func (m *User) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Roles = append(m.Roles, string(data[iNdEx:postIndex]))
|
||||
m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipAuth(data[iNdEx:])
|
||||
skippy, err := skipAuth(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -449,8 +451,8 @@ func (m *User) Unmarshal(data []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *Permission) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
func (m *Permission) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
@@ -462,7 +464,7 @@ func (m *Permission) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -490,7 +492,7 @@ func (m *Permission) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.PermType |= (Permission_Type(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -509,7 +511,7 @@ func (m *Permission) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -523,7 +525,7 @@ func (m *Permission) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Key = append(m.Key[:0], data[iNdEx:postIndex]...)
|
||||
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Key == nil {
|
||||
m.Key = []byte{}
|
||||
}
|
||||
@@ -540,7 +542,7 @@ func (m *Permission) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -554,14 +556,14 @@ func (m *Permission) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.RangeEnd = append(m.RangeEnd[:0], data[iNdEx:postIndex]...)
|
||||
m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.RangeEnd == nil {
|
||||
m.RangeEnd = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipAuth(data[iNdEx:])
|
||||
skippy, err := skipAuth(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -580,8 +582,8 @@ func (m *Permission) Unmarshal(data []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *Role) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
func (m *Role) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
@@ -593,7 +595,7 @@ func (m *Role) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -621,7 +623,7 @@ func (m *Role) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -635,7 +637,7 @@ func (m *Role) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Name = append(m.Name[:0], data[iNdEx:postIndex]...)
|
||||
m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Name == nil {
|
||||
m.Name = []byte{}
|
||||
}
|
||||
@@ -652,7 +654,7 @@ func (m *Role) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -667,13 +669,13 @@ func (m *Role) Unmarshal(data []byte) error {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.KeyPermission = append(m.KeyPermission, &Permission{})
|
||||
if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
|
||||
if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipAuth(data[iNdEx:])
|
||||
skippy, err := skipAuth(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -692,8 +694,8 @@ func (m *Role) Unmarshal(data []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipAuth(data []byte) (n int, err error) {
|
||||
l := len(data)
|
||||
func skipAuth(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
@@ -704,7 +706,7 @@ func skipAuth(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -722,7 +724,7 @@ func skipAuth(data []byte) (n int, err error) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if data[iNdEx-1] < 0x80 {
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -739,7 +741,7 @@ func skipAuth(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -762,7 +764,7 @@ func skipAuth(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -773,7 +775,7 @@ func skipAuth(data []byte) (n int, err error) {
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipAuth(data[start:])
|
||||
next, err := skipAuth(dAtA[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -797,6 +799,8 @@ var (
|
||||
ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) }
|
||||
|
||||
var fileDescriptorAuth = []byte{
|
||||
// 288 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
|
||||
|
||||
49
vendor/github.com/coreos/etcd/auth/range_perm_cache.go
generated
vendored
49
vendor/github.com/coreos/etcd/auth/range_perm_cache.go
generated
vendored
@@ -49,46 +49,37 @@ func isRangeEqual(a, b *rangePerm) bool {
|
||||
|
||||
// removeSubsetRangePerms removes any rangePerms that are subsets of other rangePerms.
|
||||
// If there are equal ranges, removeSubsetRangePerms only keeps one of them.
|
||||
func removeSubsetRangePerms(perms []*rangePerm) []*rangePerm {
|
||||
// TODO(mitake): currently it is O(n^2), we need a better algorithm
|
||||
newp := make([]*rangePerm, 0)
|
||||
|
||||
// It returns a sorted rangePerm slice.
|
||||
func removeSubsetRangePerms(perms []*rangePerm) (newp []*rangePerm) {
|
||||
sort.Sort(RangePermSliceByBegin(perms))
|
||||
var prev *rangePerm
|
||||
for i := range perms {
|
||||
skip := false
|
||||
|
||||
for j := range perms {
|
||||
if i == j {
|
||||
continue
|
||||
}
|
||||
|
||||
if isRangeEqual(perms[i], perms[j]) {
|
||||
// if ranges are equal, we only keep the first range.
|
||||
if i > j {
|
||||
skip = true
|
||||
break
|
||||
}
|
||||
} else if isSubset(perms[i], perms[j]) {
|
||||
// if a range is a strict subset of the other one, we skip the subset.
|
||||
skip = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if skip {
|
||||
if i == 0 {
|
||||
prev = perms[i]
|
||||
newp = append(newp, perms[i])
|
||||
continue
|
||||
}
|
||||
|
||||
if isRangeEqual(perms[i], prev) {
|
||||
continue
|
||||
}
|
||||
if isSubset(perms[i], prev) {
|
||||
continue
|
||||
}
|
||||
if isSubset(prev, perms[i]) {
|
||||
prev = perms[i]
|
||||
newp[len(newp)-1] = perms[i]
|
||||
continue
|
||||
}
|
||||
prev = perms[i]
|
||||
newp = append(newp, perms[i])
|
||||
}
|
||||
|
||||
return newp
|
||||
}
|
||||
|
||||
// mergeRangePerms merges adjacent rangePerms.
|
||||
func mergeRangePerms(perms []*rangePerm) []*rangePerm {
|
||||
merged := make([]*rangePerm, 0)
|
||||
var merged []*rangePerm
|
||||
perms = removeSubsetRangePerms(perms)
|
||||
sort.Sort(RangePermSliceByBegin(perms))
|
||||
|
||||
i := 0
|
||||
for i < len(perms) {
|
||||
|
||||
89
vendor/github.com/coreos/etcd/auth/simple_token.go
generated
vendored
89
vendor/github.com/coreos/etcd/auth/simple_token.go
generated
vendored
@@ -20,6 +20,9 @@ package auth
|
||||
import (
|
||||
"crypto/rand"
|
||||
"math/big"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -27,6 +30,73 @@ const (
|
||||
defaultSimpleTokenLength = 16
|
||||
)
|
||||
|
||||
// var for testing purposes
|
||||
var (
|
||||
simpleTokenTTL = 5 * time.Minute
|
||||
simpleTokenTTLResolution = 1 * time.Second
|
||||
)
|
||||
|
||||
type simpleTokenTTLKeeper struct {
|
||||
tokensMu sync.Mutex
|
||||
tokens map[string]time.Time
|
||||
stopCh chan chan struct{}
|
||||
deleteTokenFunc func(string)
|
||||
}
|
||||
|
||||
func NewSimpleTokenTTLKeeper(deletefunc func(string)) *simpleTokenTTLKeeper {
|
||||
stk := &simpleTokenTTLKeeper{
|
||||
tokens: make(map[string]time.Time),
|
||||
stopCh: make(chan chan struct{}),
|
||||
deleteTokenFunc: deletefunc,
|
||||
}
|
||||
go stk.run()
|
||||
return stk
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) stop() {
|
||||
waitCh := make(chan struct{})
|
||||
tm.stopCh <- waitCh
|
||||
<-waitCh
|
||||
close(tm.stopCh)
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) addSimpleToken(token string) {
|
||||
tm.tokens[token] = time.Now().Add(simpleTokenTTL)
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) resetSimpleToken(token string) {
|
||||
if _, ok := tm.tokens[token]; ok {
|
||||
tm.tokens[token] = time.Now().Add(simpleTokenTTL)
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) deleteSimpleToken(token string) {
|
||||
delete(tm.tokens, token)
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) run() {
|
||||
tokenTicker := time.NewTicker(simpleTokenTTLResolution)
|
||||
defer tokenTicker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-tokenTicker.C:
|
||||
nowtime := time.Now()
|
||||
tm.tokensMu.Lock()
|
||||
for t, tokenendtime := range tm.tokens {
|
||||
if nowtime.After(tokenendtime) {
|
||||
tm.deleteTokenFunc(t)
|
||||
delete(tm.tokens, t)
|
||||
}
|
||||
}
|
||||
tm.tokensMu.Unlock()
|
||||
case waitCh := <-tm.stopCh:
|
||||
tm.tokens = make(map[string]time.Time)
|
||||
waitCh <- struct{}{}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (as *authStore) GenSimpleToken() (string, error) {
|
||||
ret := make([]byte, defaultSimpleTokenLength)
|
||||
|
||||
@@ -43,6 +113,7 @@ func (as *authStore) GenSimpleToken() (string, error) {
|
||||
}
|
||||
|
||||
func (as *authStore) assignSimpleTokenToUser(username, token string) {
|
||||
as.simpleTokenKeeper.tokensMu.Lock()
|
||||
as.simpleTokensMu.Lock()
|
||||
|
||||
_, ok := as.simpleTokens[token]
|
||||
@@ -51,5 +122,23 @@ func (as *authStore) assignSimpleTokenToUser(username, token string) {
|
||||
}
|
||||
|
||||
as.simpleTokens[token] = username
|
||||
as.simpleTokenKeeper.addSimpleToken(token)
|
||||
as.simpleTokensMu.Unlock()
|
||||
as.simpleTokenKeeper.tokensMu.Unlock()
|
||||
}
|
||||
|
||||
func (as *authStore) invalidateUser(username string) {
|
||||
if as.simpleTokenKeeper == nil {
|
||||
return
|
||||
}
|
||||
as.simpleTokenKeeper.tokensMu.Lock()
|
||||
as.simpleTokensMu.Lock()
|
||||
for token, name := range as.simpleTokens {
|
||||
if strings.Compare(name, username) == 0 {
|
||||
delete(as.simpleTokens, token)
|
||||
as.simpleTokenKeeper.deleteSimpleToken(token)
|
||||
}
|
||||
}
|
||||
as.simpleTokensMu.Unlock()
|
||||
as.simpleTokenKeeper.tokensMu.Unlock()
|
||||
}
|
||||
|
||||
341
vendor/github.com/coreos/etcd/auth/store.go
generated
vendored
341
vendor/github.com/coreos/etcd/auth/store.go
generated
vendored
@@ -16,9 +16,11 @@ package auth
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
@@ -28,6 +30,7 @@ import (
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -35,6 +38,8 @@ var (
|
||||
authEnabled = []byte{1}
|
||||
authDisabled = []byte{0}
|
||||
|
||||
revisionKey = []byte("authRevision")
|
||||
|
||||
authBucketName = []byte("auth")
|
||||
authUsersBucketName = []byte("authUsers")
|
||||
authRolesBucketName = []byte("authRoles")
|
||||
@@ -44,6 +49,7 @@ var (
|
||||
ErrRootUserNotExist = errors.New("auth: root user does not exist")
|
||||
ErrRootRoleNotExist = errors.New("auth: root user does not have root role")
|
||||
ErrUserAlreadyExist = errors.New("auth: user already exists")
|
||||
ErrUserEmpty = errors.New("auth: user name is empty")
|
||||
ErrUserNotFound = errors.New("auth: user not found")
|
||||
ErrRoleAlreadyExist = errors.New("auth: role already exists")
|
||||
ErrRoleNotFound = errors.New("auth: role not found")
|
||||
@@ -51,13 +57,26 @@ var (
|
||||
ErrPermissionDenied = errors.New("auth: permission denied")
|
||||
ErrRoleNotGranted = errors.New("auth: role is not granted to the user")
|
||||
ErrPermissionNotGranted = errors.New("auth: permission is not granted to the role")
|
||||
ErrAuthNotEnabled = errors.New("auth: authentication is not enabled")
|
||||
ErrAuthOldRevision = errors.New("auth: revision in header is old")
|
||||
ErrInvalidAuthToken = errors.New("auth: invalid auth token")
|
||||
|
||||
// BcryptCost is the algorithm cost / strength for hashing auth passwords
|
||||
BcryptCost = bcrypt.DefaultCost
|
||||
)
|
||||
|
||||
const (
|
||||
rootUser = "root"
|
||||
rootRole = "root"
|
||||
|
||||
revBytesLen = 8
|
||||
)
|
||||
|
||||
type AuthInfo struct {
|
||||
Username string
|
||||
Revision uint64
|
||||
}
|
||||
|
||||
type AuthStore interface {
|
||||
// AuthEnable turns on the authentication feature
|
||||
AuthEnable() error
|
||||
@@ -110,23 +129,36 @@ type AuthStore interface {
|
||||
// RoleList gets a list of all roles
|
||||
RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
|
||||
|
||||
// UsernameFromToken gets a username from the given Token
|
||||
UsernameFromToken(token string) (string, bool)
|
||||
// AuthInfoFromToken gets a username from the given Token and current revision number
|
||||
// (The revision number is used for preventing the TOCTOU problem)
|
||||
AuthInfoFromToken(token string) (*AuthInfo, bool)
|
||||
|
||||
// IsPutPermitted checks put permission of the user
|
||||
IsPutPermitted(username string, key []byte) bool
|
||||
IsPutPermitted(authInfo *AuthInfo, key []byte) error
|
||||
|
||||
// IsRangePermitted checks range permission of the user
|
||||
IsRangePermitted(username string, key, rangeEnd []byte) bool
|
||||
IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error
|
||||
|
||||
// IsDeleteRangePermitted checks delete-range permission of the user
|
||||
IsDeleteRangePermitted(username string, key, rangeEnd []byte) bool
|
||||
IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error
|
||||
|
||||
// IsAdminPermitted checks admin permission of the user
|
||||
IsAdminPermitted(username string) bool
|
||||
IsAdminPermitted(authInfo *AuthInfo) error
|
||||
|
||||
// GenSimpleToken produces a simple random string
|
||||
GenSimpleToken() (string, error)
|
||||
|
||||
// Revision gets current revision of authStore
|
||||
Revision() uint64
|
||||
|
||||
// CheckPassword checks a given pair of username and password is correct
|
||||
CheckPassword(username, password string) (uint64, error)
|
||||
|
||||
// Close does cleanup of AuthStore
|
||||
Close() error
|
||||
|
||||
// AuthInfoFromCtx gets AuthInfo from gRPC's context
|
||||
AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error)
|
||||
}
|
||||
|
||||
type authStore struct {
|
||||
@@ -136,11 +168,33 @@ type authStore struct {
|
||||
|
||||
rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions
|
||||
|
||||
simpleTokensMu sync.RWMutex
|
||||
simpleTokens map[string]string // token -> username
|
||||
revision uint64
|
||||
|
||||
// tokenSimple in v3.2+
|
||||
indexWaiter func(uint64) <-chan struct{}
|
||||
simpleTokenKeeper *simpleTokenTTLKeeper
|
||||
simpleTokensMu sync.Mutex
|
||||
simpleTokens map[string]string // token -> username
|
||||
}
|
||||
|
||||
func newDeleterFunc(as *authStore) func(string) {
|
||||
return func(t string) {
|
||||
as.simpleTokensMu.Lock()
|
||||
defer as.simpleTokensMu.Unlock()
|
||||
if username, ok := as.simpleTokens[t]; ok {
|
||||
plog.Infof("deleting token %s for user %s", t, username)
|
||||
delete(as.simpleTokens, t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (as *authStore) AuthEnable() error {
|
||||
as.enabledMu.Lock()
|
||||
defer as.enabledMu.Unlock()
|
||||
if as.enabled {
|
||||
plog.Noticef("Authentication already enabled")
|
||||
return nil
|
||||
}
|
||||
b := as.be
|
||||
tx := b.BatchTx()
|
||||
tx.Lock()
|
||||
@@ -160,33 +214,64 @@ func (as *authStore) AuthEnable() error {
|
||||
|
||||
tx.UnsafePut(authBucketName, enableFlagKey, authEnabled)
|
||||
|
||||
as.enabledMu.Lock()
|
||||
as.enabled = true
|
||||
as.enabledMu.Unlock()
|
||||
|
||||
as.simpleTokenKeeper = NewSimpleTokenTTLKeeper(newDeleterFunc(as))
|
||||
|
||||
as.rangePermCache = make(map[string]*unifiedRangePermissions)
|
||||
|
||||
as.revision = getRevision(tx)
|
||||
|
||||
plog.Noticef("Authentication enabled")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (as *authStore) AuthDisable() {
|
||||
as.enabledMu.Lock()
|
||||
defer as.enabledMu.Unlock()
|
||||
if !as.enabled {
|
||||
return
|
||||
}
|
||||
b := as.be
|
||||
tx := b.BatchTx()
|
||||
tx.Lock()
|
||||
tx.UnsafePut(authBucketName, enableFlagKey, authDisabled)
|
||||
as.commitRevision(tx)
|
||||
tx.Unlock()
|
||||
b.ForceCommit()
|
||||
|
||||
as.enabledMu.Lock()
|
||||
as.enabled = false
|
||||
as.enabledMu.Unlock()
|
||||
|
||||
as.simpleTokensMu.Lock()
|
||||
as.simpleTokens = make(map[string]string) // invalidate all tokens
|
||||
as.simpleTokensMu.Unlock()
|
||||
if as.simpleTokenKeeper != nil {
|
||||
as.simpleTokenKeeper.stop()
|
||||
as.simpleTokenKeeper = nil
|
||||
}
|
||||
|
||||
plog.Noticef("Authentication disabled")
|
||||
}
|
||||
|
||||
func (as *authStore) Close() error {
|
||||
as.enabledMu.Lock()
|
||||
defer as.enabledMu.Unlock()
|
||||
if !as.enabled {
|
||||
return nil
|
||||
}
|
||||
if as.simpleTokenKeeper != nil {
|
||||
as.simpleTokenKeeper.stop()
|
||||
as.simpleTokenKeeper = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) {
|
||||
if !as.isAuthEnabled() {
|
||||
return nil, ErrAuthNotEnabled
|
||||
}
|
||||
|
||||
// TODO(mitake): after adding jwt support, branching based on values of ctx is required
|
||||
index := ctx.Value("index").(uint64)
|
||||
simpleToken := ctx.Value("simpleToken").(string)
|
||||
@@ -200,11 +285,6 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string
|
||||
return nil, ErrAuthFailed
|
||||
}
|
||||
|
||||
if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil {
|
||||
plog.Noticef("authentication failed, invalid password for user %s", username)
|
||||
return &pb.AuthenticateResponse{}, ErrAuthFailed
|
||||
}
|
||||
|
||||
token := fmt.Sprintf("%s.%d", simpleToken, index)
|
||||
as.assignSimpleTokenToUser(username, token)
|
||||
|
||||
@@ -212,6 +292,24 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string
|
||||
return &pb.AuthenticateResponse{Token: token}, nil
|
||||
}
|
||||
|
||||
func (as *authStore) CheckPassword(username, password string) (uint64, error) {
|
||||
tx := as.be.BatchTx()
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
|
||||
user := getUser(tx, username)
|
||||
if user == nil {
|
||||
return 0, ErrAuthFailed
|
||||
}
|
||||
|
||||
if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil {
|
||||
plog.Noticef("authentication failed, invalid password for user %s", username)
|
||||
return 0, ErrAuthFailed
|
||||
}
|
||||
|
||||
return getRevision(tx), nil
|
||||
}
|
||||
|
||||
func (as *authStore) Recover(be backend.Backend) {
|
||||
enabled := false
|
||||
as.be = be
|
||||
@@ -223,6 +321,9 @@ func (as *authStore) Recover(be backend.Backend) {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
as.revision = getRevision(tx)
|
||||
|
||||
tx.Unlock()
|
||||
|
||||
as.enabledMu.Lock()
|
||||
@@ -231,7 +332,11 @@ func (as *authStore) Recover(be backend.Backend) {
|
||||
}
|
||||
|
||||
func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
|
||||
hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), bcrypt.DefaultCost)
|
||||
if len(r.Name) == 0 {
|
||||
return nil, ErrUserEmpty
|
||||
}
|
||||
|
||||
hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost)
|
||||
if err != nil {
|
||||
plog.Errorf("failed to hash password: %s", err)
|
||||
return nil, err
|
||||
@@ -253,6 +358,8 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse,
|
||||
|
||||
putUser(tx, newUser)
|
||||
|
||||
as.commitRevision(tx)
|
||||
|
||||
plog.Noticef("added a new user: %s", r.Name)
|
||||
|
||||
return &pb.AuthUserAddResponse{}, nil
|
||||
@@ -270,6 +377,11 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete
|
||||
|
||||
delUser(tx, r.Name)
|
||||
|
||||
as.commitRevision(tx)
|
||||
|
||||
as.invalidateCachedPerm(r.Name)
|
||||
as.invalidateUser(r.Name)
|
||||
|
||||
plog.Noticef("deleted a user: %s", r.Name)
|
||||
|
||||
return &pb.AuthUserDeleteResponse{}, nil
|
||||
@@ -278,7 +390,7 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete
|
||||
func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
|
||||
// TODO(mitake): measure the cost of bcrypt.GenerateFromPassword()
|
||||
// If the cost is too high, we should move the encryption to outside of the raft
|
||||
hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), bcrypt.DefaultCost)
|
||||
hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost)
|
||||
if err != nil {
|
||||
plog.Errorf("failed to hash password: %s", err)
|
||||
return nil, err
|
||||
@@ -301,6 +413,11 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p
|
||||
|
||||
putUser(tx, updatedUser)
|
||||
|
||||
as.commitRevision(tx)
|
||||
|
||||
as.invalidateCachedPerm(r.Name)
|
||||
as.invalidateUser(r.Name)
|
||||
|
||||
plog.Noticef("changed a password of a user: %s", r.Name)
|
||||
|
||||
return &pb.AuthUserChangePasswordResponse{}, nil
|
||||
@@ -336,6 +453,8 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser
|
||||
|
||||
as.invalidateCachedPerm(r.User)
|
||||
|
||||
as.commitRevision(tx)
|
||||
|
||||
plog.Noticef("granted role %s to user %s", r.Role, r.User)
|
||||
return &pb.AuthUserGrantRoleResponse{}, nil
|
||||
}
|
||||
@@ -351,11 +470,7 @@ func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse,
|
||||
if user == nil {
|
||||
return nil, ErrUserNotFound
|
||||
}
|
||||
|
||||
for _, role := range user.Roles {
|
||||
resp.Roles = append(resp.Roles, role)
|
||||
}
|
||||
|
||||
resp.Roles = append(resp.Roles, user.Roles...)
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
@@ -404,6 +519,8 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs
|
||||
|
||||
as.invalidateCachedPerm(r.Name)
|
||||
|
||||
as.commitRevision(tx)
|
||||
|
||||
plog.Noticef("revoked role %s from user %s", r.Role, r.Name)
|
||||
return &pb.AuthUserRevokeRoleResponse{}, nil
|
||||
}
|
||||
@@ -419,11 +536,7 @@ func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse,
|
||||
if role == nil {
|
||||
return nil, ErrRoleNotFound
|
||||
}
|
||||
|
||||
for _, perm := range role.KeyPermission {
|
||||
resp.Perm = append(resp.Perm, perm)
|
||||
}
|
||||
|
||||
resp.Perm = append(resp.Perm, role.KeyPermission...)
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
@@ -473,6 +586,8 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest)
|
||||
// It should be optimized.
|
||||
as.clearCachedPerm()
|
||||
|
||||
as.commitRevision(tx)
|
||||
|
||||
plog.Noticef("revoked key %s from role %s", r.Key, r.Role)
|
||||
return &pb.AuthRoleRevokePermissionResponse{}, nil
|
||||
}
|
||||
@@ -501,6 +616,8 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete
|
||||
|
||||
delRole(tx, r.Role)
|
||||
|
||||
as.commitRevision(tx)
|
||||
|
||||
plog.Noticef("deleted role %s", r.Role)
|
||||
return &pb.AuthRoleDeleteResponse{}, nil
|
||||
}
|
||||
@@ -521,16 +638,24 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse,
|
||||
|
||||
putRole(tx, newRole)
|
||||
|
||||
as.commitRevision(tx)
|
||||
|
||||
plog.Noticef("Role %s is created", r.Name)
|
||||
|
||||
return &pb.AuthRoleAddResponse{}, nil
|
||||
}
|
||||
|
||||
func (as *authStore) UsernameFromToken(token string) (string, bool) {
|
||||
as.simpleTokensMu.RLock()
|
||||
defer as.simpleTokensMu.RUnlock()
|
||||
t, ok := as.simpleTokens[token]
|
||||
return t, ok
|
||||
func (as *authStore) AuthInfoFromToken(token string) (*AuthInfo, bool) {
|
||||
// same as '(t *tokenSimple) info' in v3.2+
|
||||
as.simpleTokenKeeper.tokensMu.Lock()
|
||||
as.simpleTokensMu.Lock()
|
||||
username, ok := as.simpleTokens[token]
|
||||
if ok {
|
||||
as.simpleTokenKeeper.resetSimpleToken(token)
|
||||
}
|
||||
as.simpleTokensMu.Unlock()
|
||||
as.simpleTokenKeeper.tokensMu.Unlock()
|
||||
return &AuthInfo{Username: username, Revision: as.revision}, ok
|
||||
}
|
||||
|
||||
type permSlice []*authpb.Permission
|
||||
@@ -582,15 +707,26 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (
|
||||
// It should be optimized.
|
||||
as.clearCachedPerm()
|
||||
|
||||
as.commitRevision(tx)
|
||||
|
||||
plog.Noticef("role %s's permission of key %s is updated as %s", r.Name, r.Perm.Key, authpb.Permission_Type_name[int32(r.Perm.PermType)])
|
||||
|
||||
return &pb.AuthRoleGrantPermissionResponse{}, nil
|
||||
}
|
||||
|
||||
func (as *authStore) isOpPermitted(userName string, key, rangeEnd []byte, permTyp authpb.Permission_Type) bool {
|
||||
func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeEnd []byte, permTyp authpb.Permission_Type) error {
|
||||
// TODO(mitake): this function would be costly so we need a caching mechanism
|
||||
if !as.isAuthEnabled() {
|
||||
return true
|
||||
return nil
|
||||
}
|
||||
|
||||
// only gets rev == 0 when passed AuthInfo{}; no user given
|
||||
if revision == 0 {
|
||||
return ErrUserEmpty
|
||||
}
|
||||
|
||||
if revision < as.revision {
|
||||
return ErrAuthOldRevision
|
||||
}
|
||||
|
||||
tx := as.be.BatchTx()
|
||||
@@ -600,48 +736,55 @@ func (as *authStore) isOpPermitted(userName string, key, rangeEnd []byte, permTy
|
||||
user := getUser(tx, userName)
|
||||
if user == nil {
|
||||
plog.Errorf("invalid user name %s for permission checking", userName)
|
||||
return false
|
||||
return ErrPermissionDenied
|
||||
}
|
||||
|
||||
// root role should have permission on all ranges
|
||||
if hasRootRole(user) {
|
||||
return true
|
||||
return nil
|
||||
}
|
||||
|
||||
if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) {
|
||||
return true
|
||||
return nil
|
||||
}
|
||||
|
||||
return false
|
||||
return ErrPermissionDenied
|
||||
}
|
||||
|
||||
func (as *authStore) IsPutPermitted(username string, key []byte) bool {
|
||||
return as.isOpPermitted(username, key, nil, authpb.WRITE)
|
||||
func (as *authStore) IsPutPermitted(authInfo *AuthInfo, key []byte) error {
|
||||
return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, nil, authpb.WRITE)
|
||||
}
|
||||
|
||||
func (as *authStore) IsRangePermitted(username string, key, rangeEnd []byte) bool {
|
||||
return as.isOpPermitted(username, key, rangeEnd, authpb.READ)
|
||||
func (as *authStore) IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error {
|
||||
return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.READ)
|
||||
}
|
||||
|
||||
func (as *authStore) IsDeleteRangePermitted(username string, key, rangeEnd []byte) bool {
|
||||
return as.isOpPermitted(username, key, rangeEnd, authpb.WRITE)
|
||||
func (as *authStore) IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error {
|
||||
return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.WRITE)
|
||||
}
|
||||
|
||||
func (as *authStore) IsAdminPermitted(username string) bool {
|
||||
func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error {
|
||||
if !as.isAuthEnabled() {
|
||||
return true
|
||||
return nil
|
||||
}
|
||||
if authInfo == nil {
|
||||
return ErrUserEmpty
|
||||
}
|
||||
|
||||
tx := as.be.BatchTx()
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
|
||||
u := getUser(tx, username)
|
||||
u := getUser(tx, authInfo.Username)
|
||||
if u == nil {
|
||||
return false
|
||||
return ErrUserNotFound
|
||||
}
|
||||
|
||||
return hasRootRole(u)
|
||||
if !hasRootRole(u) {
|
||||
return ErrPermissionDenied
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getUser(tx backend.BatchTx, username string) *authpb.User {
|
||||
@@ -745,7 +888,7 @@ func (as *authStore) isAuthEnabled() bool {
|
||||
return as.enabled
|
||||
}
|
||||
|
||||
func NewAuthStore(be backend.Backend) *authStore {
|
||||
func NewAuthStore(be backend.Backend, indexWaiter func(uint64) <-chan struct{}) *authStore {
|
||||
tx := be.BatchTx()
|
||||
tx.Lock()
|
||||
|
||||
@@ -753,13 +896,35 @@ func NewAuthStore(be backend.Backend) *authStore {
|
||||
tx.UnsafeCreateBucket(authUsersBucketName)
|
||||
tx.UnsafeCreateBucket(authRolesBucketName)
|
||||
|
||||
enabled := false
|
||||
_, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0)
|
||||
if len(vs) == 1 {
|
||||
if bytes.Equal(vs[0], authEnabled) {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
as := &authStore{
|
||||
be: be,
|
||||
simpleTokens: make(map[string]string),
|
||||
revision: getRevision(tx),
|
||||
indexWaiter: indexWaiter,
|
||||
enabled: enabled,
|
||||
rangePermCache: make(map[string]*unifiedRangePermissions),
|
||||
}
|
||||
|
||||
if enabled {
|
||||
as.simpleTokenKeeper = NewSimpleTokenTTLKeeper(newDeleterFunc(as))
|
||||
}
|
||||
|
||||
if as.revision == 0 {
|
||||
as.commitRevision(tx)
|
||||
}
|
||||
|
||||
tx.Unlock()
|
||||
be.ForceCommit()
|
||||
|
||||
return &authStore{
|
||||
be: be,
|
||||
simpleTokens: make(map[string]string),
|
||||
}
|
||||
return as
|
||||
}
|
||||
|
||||
func hasRootRole(u *authpb.User) bool {
|
||||
@@ -770,3 +935,67 @@ func hasRootRole(u *authpb.User) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (as *authStore) commitRevision(tx backend.BatchTx) {
|
||||
as.revision++
|
||||
revBytes := make([]byte, revBytesLen)
|
||||
binary.BigEndian.PutUint64(revBytes, as.revision)
|
||||
tx.UnsafePut(authBucketName, revisionKey, revBytes)
|
||||
}
|
||||
|
||||
func getRevision(tx backend.BatchTx) uint64 {
|
||||
_, vs := tx.UnsafeRange(authBucketName, []byte(revisionKey), nil, 0)
|
||||
if len(vs) != 1 {
|
||||
// this can happen in the initialization phase
|
||||
return 0
|
||||
}
|
||||
|
||||
return binary.BigEndian.Uint64(vs[0])
|
||||
}
|
||||
|
||||
func (as *authStore) Revision() uint64 {
|
||||
return as.revision
|
||||
}
|
||||
|
||||
func (as *authStore) isValidSimpleToken(token string, ctx context.Context) bool {
|
||||
splitted := strings.Split(token, ".")
|
||||
if len(splitted) != 2 {
|
||||
return false
|
||||
}
|
||||
index, err := strconv.Atoi(splitted[1])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
select {
|
||||
case <-as.indexWaiter(uint64(index)):
|
||||
return true
|
||||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) {
|
||||
md, ok := metadata.FromContext(ctx)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
ts, tok := md["token"]
|
||||
if !tok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
token := ts[0]
|
||||
if !as.isValidSimpleToken(token, ctx) {
|
||||
return nil, ErrInvalidAuthToken
|
||||
}
|
||||
|
||||
authInfo, uok := as.AuthInfoFromToken(token)
|
||||
if !uok {
|
||||
plog.Warningf("invalid auth token: %s", token)
|
||||
return nil, ErrInvalidAuthToken
|
||||
}
|
||||
return authInfo, nil
|
||||
}
|
||||
|
||||
2
vendor/github.com/coreos/etcd/client/README.md
generated
vendored
2
vendor/github.com/coreos/etcd/client/README.md
generated
vendored
@@ -114,4 +114,4 @@ if err != nil {
|
||||
|
||||
3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention.
|
||||
|
||||
4. etcd/client cannot detect whether the member in use is healthy when doing read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. As a workaround, users could monitor experimental /health endpoint for member healthy information. We are improving it at [#3265](https://github.com/coreos/etcd/issues/3265).
|
||||
4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read requests or monitor the /health endpoint for member health information.
|
||||
|
||||
147
vendor/github.com/coreos/etcd/client/client.go
generated
vendored
147
vendor/github.com/coreos/etcd/client/client.go
generated
vendored
@@ -22,7 +22,6 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
@@ -261,53 +260,67 @@ type httpClusterClient struct {
|
||||
selectionMode EndpointSelectionMode
|
||||
}
|
||||
|
||||
func (c *httpClusterClient) getLeaderEndpoint() (string, error) {
|
||||
mAPI := NewMembersAPI(c)
|
||||
leader, err := mAPI.Leader(context.Background())
|
||||
func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) {
|
||||
ceps := make([]url.URL, len(eps))
|
||||
copy(ceps, eps)
|
||||
|
||||
// To perform a lookup on the new endpoint list without using the current
|
||||
// client, we'll copy it
|
||||
clientCopy := &httpClusterClient{
|
||||
clientFactory: c.clientFactory,
|
||||
credentials: c.credentials,
|
||||
rand: c.rand,
|
||||
|
||||
pinned: 0,
|
||||
endpoints: ceps,
|
||||
}
|
||||
|
||||
mAPI := NewMembersAPI(clientCopy)
|
||||
leader, err := mAPI.Leader(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(leader.ClientURLs) == 0 {
|
||||
return "", ErrNoLeaderEndpoint
|
||||
}
|
||||
|
||||
return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
|
||||
}
|
||||
|
||||
func (c *httpClusterClient) SetEndpoints(eps []string) error {
|
||||
func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) {
|
||||
if len(eps) == 0 {
|
||||
return ErrNoEndpoints
|
||||
return []url.URL{}, ErrNoEndpoints
|
||||
}
|
||||
|
||||
neps := make([]url.URL, len(eps))
|
||||
for i, ep := range eps {
|
||||
u, err := url.Parse(ep)
|
||||
if err != nil {
|
||||
return err
|
||||
return []url.URL{}, err
|
||||
}
|
||||
neps[i] = *u
|
||||
}
|
||||
return neps, nil
|
||||
}
|
||||
|
||||
switch c.selectionMode {
|
||||
case EndpointSelectionRandom:
|
||||
c.endpoints = shuffleEndpoints(c.rand, neps)
|
||||
c.pinned = 0
|
||||
case EndpointSelectionPrioritizeLeader:
|
||||
c.endpoints = neps
|
||||
lep, err := c.getLeaderEndpoint()
|
||||
if err != nil {
|
||||
return ErrNoLeaderEndpoint
|
||||
}
|
||||
|
||||
for i := range c.endpoints {
|
||||
if c.endpoints[i].String() == lep {
|
||||
c.pinned = i
|
||||
break
|
||||
}
|
||||
}
|
||||
// If endpoints doesn't have the lu, just keep c.pinned = 0.
|
||||
// Forwarding between follower and leader would be required but it works.
|
||||
default:
|
||||
return errors.New(fmt.Sprintf("invalid endpoint selection mode: %d", c.selectionMode))
|
||||
func (c *httpClusterClient) SetEndpoints(eps []string) error {
|
||||
neps, err := c.parseEndpoints(eps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
c.endpoints = shuffleEndpoints(c.rand, neps)
|
||||
// We're not doing anything for PrioritizeLeader here. This is
|
||||
// due to not having a context meaning we can't call getLeaderEndpoint
|
||||
// However, if you're using PrioritizeLeader, you've already been told
|
||||
// to regularly call sync, where we do have a ctx, and can figure the
|
||||
// leader. PrioritizeLeader is also quite a loose guarantee, so deal
|
||||
// with it
|
||||
c.pinned = 0
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -401,27 +414,51 @@ func (c *httpClusterClient) Sync(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
eps := make([]string, 0)
|
||||
var eps []string
|
||||
for _, m := range ms {
|
||||
eps = append(eps, m.ClientURLs...)
|
||||
}
|
||||
sort.Sort(sort.StringSlice(eps))
|
||||
|
||||
ceps := make([]string, len(c.endpoints))
|
||||
for i, cep := range c.endpoints {
|
||||
ceps[i] = cep.String()
|
||||
}
|
||||
sort.Sort(sort.StringSlice(ceps))
|
||||
// fast path if no change happens
|
||||
// this helps client to pin the endpoint when no cluster change
|
||||
if reflect.DeepEqual(eps, ceps) {
|
||||
return nil
|
||||
neps, err := c.parseEndpoints(eps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.SetEndpoints(eps)
|
||||
npin := 0
|
||||
|
||||
switch c.selectionMode {
|
||||
case EndpointSelectionRandom:
|
||||
c.RLock()
|
||||
eq := endpointsEqual(c.endpoints, neps)
|
||||
c.RUnlock()
|
||||
|
||||
if eq {
|
||||
return nil
|
||||
}
|
||||
// When items in the endpoint list changes, we choose a new pin
|
||||
neps = shuffleEndpoints(c.rand, neps)
|
||||
case EndpointSelectionPrioritizeLeader:
|
||||
nle, err := c.getLeaderEndpoint(ctx, neps)
|
||||
if err != nil {
|
||||
return ErrNoLeaderEndpoint
|
||||
}
|
||||
|
||||
for i, n := range neps {
|
||||
if n.String() == nle {
|
||||
npin = i
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.endpoints = neps
|
||||
c.pinned = npin
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
|
||||
@@ -607,3 +644,27 @@ func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
|
||||
}
|
||||
return neps
|
||||
}
|
||||
|
||||
func endpointsEqual(left, right []url.URL) bool {
|
||||
if len(left) != len(right) {
|
||||
return false
|
||||
}
|
||||
|
||||
sLeft := make([]string, len(left))
|
||||
sRight := make([]string, len(right))
|
||||
for i, l := range left {
|
||||
sLeft[i] = l.String()
|
||||
}
|
||||
for i, r := range right {
|
||||
sRight[i] = r.String()
|
||||
}
|
||||
|
||||
sort.Strings(sLeft)
|
||||
sort.Strings(sRight)
|
||||
for i := range sLeft {
|
||||
if sLeft[i] != sRight[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
6
vendor/github.com/coreos/etcd/client/cluster_error.go
generated
vendored
6
vendor/github.com/coreos/etcd/client/cluster_error.go
generated
vendored
@@ -21,7 +21,11 @@ type ClusterError struct {
|
||||
}
|
||||
|
||||
func (ce *ClusterError) Error() string {
|
||||
return ErrClusterUnavailable.Error()
|
||||
s := ErrClusterUnavailable.Error()
|
||||
for i, e := range ce.Errors {
|
||||
s += fmt.Sprintf("; error #%d: %s\n", i, e)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (ce *ClusterError) Detail() string {
|
||||
|
||||
675
vendor/github.com/coreos/etcd/client/keys.generated.go
generated
vendored
675
vendor/github.com/coreos/etcd/client/keys.generated.go
generated
vendored
File diff suppressed because it is too large
Load Diff
32
vendor/github.com/coreos/etcd/client/keys.go
generated
vendored
32
vendor/github.com/coreos/etcd/client/keys.go
generated
vendored
@@ -191,6 +191,10 @@ type SetOptions struct {
|
||||
|
||||
// Dir specifies whether or not this Node should be created as a directory.
|
||||
Dir bool
|
||||
|
||||
// NoValueOnSuccess specifies whether the response contains the current value of the Node.
|
||||
// If set, the response will only contain the current value when the request fails.
|
||||
NoValueOnSuccess bool
|
||||
}
|
||||
|
||||
type GetOptions struct {
|
||||
@@ -268,6 +272,10 @@ type Response struct {
|
||||
// Index holds the cluster-level index at the time the Response was generated.
|
||||
// This index is not tied to the Node(s) contained in this Response.
|
||||
Index uint64 `json:"-"`
|
||||
|
||||
// ClusterID holds the cluster-level ID reported by the server. This
|
||||
// should be different for different etcd clusters.
|
||||
ClusterID string `json:"-"`
|
||||
}
|
||||
|
||||
type Node struct {
|
||||
@@ -335,6 +343,7 @@ func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions
|
||||
act.TTL = opts.TTL
|
||||
act.Refresh = opts.Refresh
|
||||
act.Dir = opts.Dir
|
||||
act.NoValueOnSuccess = opts.NoValueOnSuccess
|
||||
}
|
||||
|
||||
doCtx := ctx
|
||||
@@ -523,15 +532,16 @@ func (w *waitAction) HTTPRequest(ep url.URL) *http.Request {
|
||||
}
|
||||
|
||||
type setAction struct {
|
||||
Prefix string
|
||||
Key string
|
||||
Value string
|
||||
PrevValue string
|
||||
PrevIndex uint64
|
||||
PrevExist PrevExistType
|
||||
TTL time.Duration
|
||||
Refresh bool
|
||||
Dir bool
|
||||
Prefix string
|
||||
Key string
|
||||
Value string
|
||||
PrevValue string
|
||||
PrevIndex uint64
|
||||
PrevExist PrevExistType
|
||||
TTL time.Duration
|
||||
Refresh bool
|
||||
Dir bool
|
||||
NoValueOnSuccess bool
|
||||
}
|
||||
|
||||
func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
|
||||
@@ -565,6 +575,9 @@ func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
|
||||
if a.Refresh {
|
||||
form.Add("refresh", "true")
|
||||
}
|
||||
if a.NoValueOnSuccess {
|
||||
params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess))
|
||||
}
|
||||
|
||||
u.RawQuery = params.Encode()
|
||||
body := strings.NewReader(form.Encode())
|
||||
@@ -656,6 +669,7 @@ func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
res.ClusterID = header.Get("X-Etcd-Cluster-ID")
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
|
||||
30
vendor/github.com/coreos/etcd/client/util.go
generated
vendored
30
vendor/github.com/coreos/etcd/client/util.go
generated
vendored
@@ -14,6 +14,20 @@
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var (
|
||||
roleNotFoundRegExp *regexp.Regexp
|
||||
userNotFoundRegExp *regexp.Regexp
|
||||
)
|
||||
|
||||
func init() {
|
||||
roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.")
|
||||
userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.")
|
||||
}
|
||||
|
||||
// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
|
||||
func IsKeyNotFound(err error) bool {
|
||||
if cErr, ok := err.(Error); ok {
|
||||
@@ -21,3 +35,19 @@ func IsKeyNotFound(err error) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsRoleNotFound returns true if the error means role not found of v2 API.
|
||||
func IsRoleNotFound(err error) bool {
|
||||
if ae, ok := err.(authError); ok {
|
||||
return roleNotFoundRegExp.MatchString(ae.Message)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsUserNotFound returns true if the error means user not found of v2 API.
|
||||
func IsUserNotFound(err error) bool {
|
||||
if ae, ok := err.(authError); ok {
|
||||
return userNotFoundRegExp.MatchString(ae.Message)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
4
vendor/github.com/coreos/etcd/clientv3/README.md
generated
vendored
4
vendor/github.com/coreos/etcd/clientv3/README.md
generated
vendored
@@ -72,6 +72,10 @@ if err != nil {
|
||||
}
|
||||
```
|
||||
|
||||
## Metrics
|
||||
|
||||
The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/coreos/etcd/blob/master/clientv3/example_metrics_test.go).
|
||||
|
||||
## Examples
|
||||
|
||||
More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3).
|
||||
|
||||
5
vendor/github.com/coreos/etcd/clientv3/auth.go
generated
vendored
5
vendor/github.com/coreos/etcd/clientv3/auth.go
generated
vendored
@@ -43,6 +43,7 @@ type (
|
||||
AuthRoleListResponse pb.AuthRoleListResponse
|
||||
|
||||
PermissionType authpb.Permission_Type
|
||||
Permission authpb.Permission
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -115,12 +116,12 @@ func NewAuth(c *Client) Auth {
|
||||
}
|
||||
|
||||
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
|
||||
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{})
|
||||
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false))
|
||||
return (*AuthEnableResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
|
||||
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{})
|
||||
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false))
|
||||
return (*AuthDisableResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
|
||||
76
vendor/github.com/coreos/etcd/clientv3/balancer.go
generated
vendored
76
vendor/github.com/coreos/etcd/clientv3/balancer.go
generated
vendored
@@ -21,8 +21,14 @@ import (
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
// ErrNoAddrAvilable is returned by Get() when the balancer does not have
|
||||
// any active connection to endpoints at the time.
|
||||
// This error is returned only when opts.BlockingWait is true.
|
||||
var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available")
|
||||
|
||||
// simpleBalancer does the bare minimum to expose multiple eps
|
||||
// to the grpc reconnection code path
|
||||
type simpleBalancer struct {
|
||||
@@ -42,6 +48,11 @@ type simpleBalancer struct {
|
||||
// upc closes when upEps transitions from empty to non-zero or the balancer closes.
|
||||
upc chan struct{}
|
||||
|
||||
// grpc issues TLS cert checks using the string passed into dial so
|
||||
// that string must be the host. To recover the full scheme://host URL,
|
||||
// have a map from hosts to the original endpoint.
|
||||
host2ep map[string]string
|
||||
|
||||
// pinAddr is the currently pinned address; set to the empty string on
|
||||
// intialization and shutdown.
|
||||
pinAddr string
|
||||
@@ -62,11 +73,12 @@ func newSimpleBalancer(eps []string) *simpleBalancer {
|
||||
readyc: make(chan struct{}),
|
||||
upEps: make(map[string]struct{}),
|
||||
upc: make(chan struct{}),
|
||||
host2ep: getHost2ep(eps),
|
||||
}
|
||||
return sb
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) Start(target string) error { return nil }
|
||||
func (b *simpleBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
|
||||
|
||||
func (b *simpleBalancer) ConnectNotify() <-chan struct{} {
|
||||
b.mu.Lock()
|
||||
@@ -74,6 +86,49 @@ func (b *simpleBalancer) ConnectNotify() <-chan struct{} {
|
||||
return b.upc
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) getEndpoint(host string) string {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.host2ep[host]
|
||||
}
|
||||
|
||||
func getHost2ep(eps []string) map[string]string {
|
||||
hm := make(map[string]string, len(eps))
|
||||
for i := range eps {
|
||||
_, host, _ := parseEndpoint(eps[i])
|
||||
hm[host] = eps[i]
|
||||
}
|
||||
return hm
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) updateAddrs(eps []string) {
|
||||
np := getHost2ep(eps)
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
match := len(np) == len(b.host2ep)
|
||||
for k, v := range np {
|
||||
if b.host2ep[k] != v {
|
||||
match = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if match {
|
||||
// same endpoints, so no need to update address
|
||||
return
|
||||
}
|
||||
|
||||
b.host2ep = np
|
||||
|
||||
addrs := make([]grpc.Address, 0, len(eps))
|
||||
for i := range eps {
|
||||
addrs = append(addrs, grpc.Address{Addr: getHost(eps[i])})
|
||||
}
|
||||
b.addrs = addrs
|
||||
b.notifyCh <- addrs
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
@@ -113,6 +168,25 @@ func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
|
||||
|
||||
func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
|
||||
var addr string
|
||||
|
||||
// If opts.BlockingWait is false (for fail-fast RPCs), it should return
|
||||
// an address it has notified via Notify immediately instead of blocking.
|
||||
if !opts.BlockingWait {
|
||||
b.mu.RLock()
|
||||
closed := b.closed
|
||||
addr = b.pinAddr
|
||||
upEps := len(b.upEps)
|
||||
b.mu.RUnlock()
|
||||
if closed {
|
||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||
}
|
||||
|
||||
if upEps == 0 {
|
||||
return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable
|
||||
}
|
||||
return grpc.Address{Addr: addr}, func() {}, nil
|
||||
}
|
||||
|
||||
for {
|
||||
b.mu.RLock()
|
||||
ch := b.upc
|
||||
|
||||
198
vendor/github.com/coreos/etcd/clientv3/client.go
generated
vendored
198
vendor/github.com/coreos/etcd/clientv3/client.go
generated
vendored
@@ -18,17 +18,18 @@ import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
@@ -46,11 +47,12 @@ type Client struct {
|
||||
Auth
|
||||
Maintenance
|
||||
|
||||
conn *grpc.ClientConn
|
||||
cfg Config
|
||||
creds *credentials.TransportCredentials
|
||||
balancer *simpleBalancer
|
||||
retryWrapper retryRpcFunc
|
||||
conn *grpc.ClientConn
|
||||
cfg Config
|
||||
creds *credentials.TransportCredentials
|
||||
balancer *simpleBalancer
|
||||
retryWrapper retryRpcFunc
|
||||
retryAuthWrapper retryRpcFunc
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
@@ -59,6 +61,8 @@ type Client struct {
|
||||
Username string
|
||||
// Password is a password for authentication
|
||||
Password string
|
||||
// tokenCred is an instance of WithPerRPCCredentials()'s argument
|
||||
tokenCred *authTokenCredential
|
||||
}
|
||||
|
||||
// New creates a new etcdv3 client from a given configuration.
|
||||
@@ -87,6 +91,8 @@ func NewFromConfigFile(path string) (*Client, error) {
|
||||
// Close shuts down the client's etcd connections.
|
||||
func (c *Client) Close() error {
|
||||
c.cancel()
|
||||
c.Watcher.Close()
|
||||
c.Lease.Close()
|
||||
return toErr(c.ctx, c.conn.Close())
|
||||
}
|
||||
|
||||
@@ -96,10 +102,54 @@ func (c *Client) Close() error {
|
||||
func (c *Client) Ctx() context.Context { return c.ctx }
|
||||
|
||||
// Endpoints lists the registered endpoints for the client.
|
||||
func (c *Client) Endpoints() []string { return c.cfg.Endpoints }
|
||||
func (c *Client) Endpoints() (eps []string) {
|
||||
// copy the slice; protect original endpoints from being changed
|
||||
eps = make([]string, len(c.cfg.Endpoints))
|
||||
copy(eps, c.cfg.Endpoints)
|
||||
return
|
||||
}
|
||||
|
||||
// SetEndpoints updates client's endpoints.
|
||||
func (c *Client) SetEndpoints(eps ...string) {
|
||||
c.cfg.Endpoints = eps
|
||||
c.balancer.updateAddrs(eps)
|
||||
}
|
||||
|
||||
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
|
||||
func (c *Client) Sync(ctx context.Context) error {
|
||||
mresp, err := c.MemberList(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var eps []string
|
||||
for _, m := range mresp.Members {
|
||||
eps = append(eps, m.ClientURLs...)
|
||||
}
|
||||
c.SetEndpoints(eps...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) autoSync() {
|
||||
if c.cfg.AutoSyncInterval == time.Duration(0) {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
return
|
||||
case <-time.After(c.cfg.AutoSyncInterval):
|
||||
ctx, _ := context.WithTimeout(c.ctx, 5*time.Second)
|
||||
if err := c.Sync(ctx); err != nil && err != c.ctx.Err() {
|
||||
logger.Println("Auto sync endpoints failed:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type authTokenCredential struct {
|
||||
token string
|
||||
token string
|
||||
tokenMu *sync.RWMutex
|
||||
}
|
||||
|
||||
func (cred authTokenCredential) RequireTransportSecurity() bool {
|
||||
@@ -107,24 +157,38 @@ func (cred authTokenCredential) RequireTransportSecurity() bool {
|
||||
}
|
||||
|
||||
func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
|
||||
cred.tokenMu.RLock()
|
||||
defer cred.tokenMu.RUnlock()
|
||||
return map[string]string{
|
||||
"token": cred.token,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Client) dialTarget(endpoint string) (proto string, host string, creds *credentials.TransportCredentials) {
|
||||
func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
|
||||
proto = "tcp"
|
||||
host = endpoint
|
||||
creds = c.creds
|
||||
url, uerr := url.Parse(endpoint)
|
||||
if uerr != nil || !strings.Contains(endpoint, "://") {
|
||||
return
|
||||
}
|
||||
scheme = url.Scheme
|
||||
|
||||
// strip scheme:// prefix since grpc dials by host
|
||||
host = url.Host
|
||||
switch url.Scheme {
|
||||
case "http", "https":
|
||||
case "unix":
|
||||
proto = "unix"
|
||||
default:
|
||||
proto, host = "", ""
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
|
||||
creds = c.creds
|
||||
switch scheme {
|
||||
case "unix":
|
||||
case "http":
|
||||
creds = nil
|
||||
case "https":
|
||||
@@ -135,7 +199,7 @@ func (c *Client) dialTarget(endpoint string) (proto string, host string, creds *
|
||||
emptyCreds := credentials.NewTLS(tlsconfig)
|
||||
creds = &emptyCreds
|
||||
default:
|
||||
return "", "", nil
|
||||
creds = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -147,17 +211,8 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts
|
||||
}
|
||||
opts = append(opts, dopts...)
|
||||
|
||||
// grpc issues TLS cert checks using the string passed into dial so
|
||||
// that string must be the host. To recover the full scheme://host URL,
|
||||
// have a map from hosts to the original endpoint.
|
||||
host2ep := make(map[string]string)
|
||||
for i := range c.cfg.Endpoints {
|
||||
_, host, _ := c.dialTarget(c.cfg.Endpoints[i])
|
||||
host2ep[host] = c.cfg.Endpoints[i]
|
||||
}
|
||||
|
||||
f := func(host string, t time.Duration) (net.Conn, error) {
|
||||
proto, host, _ := c.dialTarget(host2ep[host])
|
||||
proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host))
|
||||
if proto == "" {
|
||||
return nil, fmt.Errorf("unknown scheme for %q", host)
|
||||
}
|
||||
@@ -166,11 +221,15 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts
|
||||
return nil, c.ctx.Err()
|
||||
default:
|
||||
}
|
||||
return net.DialTimeout(proto, host, t)
|
||||
dialer := &net.Dialer{Timeout: t}
|
||||
return dialer.DialContext(c.ctx, proto, host)
|
||||
}
|
||||
opts = append(opts, grpc.WithDialer(f))
|
||||
|
||||
_, _, creds := c.dialTarget(endpoint)
|
||||
creds := c.creds
|
||||
if _, _, scheme := parseEndpoint(endpoint); len(scheme) != 0 {
|
||||
creds = c.processCreds(scheme)
|
||||
}
|
||||
if creds != nil {
|
||||
opts = append(opts, grpc.WithTransportCredentials(*creds))
|
||||
} else {
|
||||
@@ -185,24 +244,56 @@ func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) {
|
||||
return c.dial(endpoint)
|
||||
}
|
||||
|
||||
func (c *Client) getToken(ctx context.Context) error {
|
||||
var err error // return last error in a case of fail
|
||||
var auth *authenticator
|
||||
|
||||
for i := 0; i < len(c.cfg.Endpoints); i++ {
|
||||
endpoint := c.cfg.Endpoints[i]
|
||||
host := getHost(endpoint)
|
||||
// use dial options without dopts to avoid reusing the client balancer
|
||||
auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
defer auth.close()
|
||||
|
||||
var resp *AuthenticateResponse
|
||||
resp, err = auth.authenticate(ctx, c.Username, c.Password)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
c.tokenCred.tokenMu.Lock()
|
||||
c.tokenCred.token = resp.Token
|
||||
c.tokenCred.tokenMu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||
opts := c.dialSetupOpts(endpoint, dopts...)
|
||||
host := getHost(endpoint)
|
||||
if c.Username != "" && c.Password != "" {
|
||||
// use dial options without dopts to avoid reusing the client balancer
|
||||
auth, err := newAuthenticator(host, c.dialSetupOpts(endpoint))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
c.tokenCred = &authTokenCredential{
|
||||
tokenMu: &sync.RWMutex{},
|
||||
}
|
||||
defer auth.close()
|
||||
|
||||
resp, err := auth.authenticate(c.ctx, c.Username, c.Password)
|
||||
err := c.getToken(context.TODO())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts = append(opts, grpc.WithPerRPCCredentials(authTokenCredential{token: resp.Token}))
|
||||
|
||||
opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred))
|
||||
}
|
||||
|
||||
// add metrics options
|
||||
opts = append(opts, grpc.WithUnaryInterceptor(prometheus.UnaryClientInterceptor))
|
||||
opts = append(opts, grpc.WithStreamInterceptor(prometheus.StreamClientInterceptor))
|
||||
|
||||
conn, err := grpc.Dial(host, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -248,6 +339,7 @@ func newClient(cfg *Config) (*Client, error) {
|
||||
}
|
||||
client.conn = conn
|
||||
client.retryWrapper = client.newRetryWrapper()
|
||||
client.retryAuthWrapper = client.newAuthRetryWrapper()
|
||||
|
||||
// wait for a connection
|
||||
if cfg.DialTimeout > 0 {
|
||||
@@ -272,13 +364,8 @@ func newClient(cfg *Config) (*Client, error) {
|
||||
client.Watcher = NewWatcher(client)
|
||||
client.Auth = NewAuth(client)
|
||||
client.Maintenance = NewMaintenance(client)
|
||||
if cfg.Logger != nil {
|
||||
logger.Set(cfg.Logger)
|
||||
} else {
|
||||
// disable client side grpc by default
|
||||
logger.Set(log.New(ioutil.Discard, "", 0))
|
||||
}
|
||||
|
||||
go client.autoSync()
|
||||
return client, nil
|
||||
}
|
||||
|
||||
@@ -294,17 +381,14 @@ func isHaltErr(ctx context.Context, err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
eErr := rpctypes.Error(err)
|
||||
if _, ok := eErr.(rpctypes.EtcdError); ok {
|
||||
return eErr != rpctypes.ErrStopped && eErr != rpctypes.ErrNoLeader
|
||||
}
|
||||
// treat etcdserver errors not recognized by the client as halting
|
||||
return isConnClosing(err) || strings.Contains(err.Error(), "etcdserver:")
|
||||
}
|
||||
|
||||
// isConnClosing returns true if the error matches a grpc client closing error
|
||||
func isConnClosing(err error) bool {
|
||||
return strings.Contains(err.Error(), grpc.ErrClientConnClosing.Error())
|
||||
code := grpc.Code(err)
|
||||
// Unavailable codes mean the system will be right back.
|
||||
// (e.g., can't connect, lost leader)
|
||||
// Treat Internal codes as if something failed, leaving the
|
||||
// system in an inconsistent state, but retrying could make progress.
|
||||
// (e.g., failed in middle of send, corrupted frame)
|
||||
// TODO: are permanent Internal errors possible from grpc?
|
||||
return code != codes.Unavailable && code != codes.Internal
|
||||
}
|
||||
|
||||
func toErr(ctx context.Context, err error) error {
|
||||
@@ -312,12 +396,20 @@ func toErr(ctx context.Context, err error) error {
|
||||
return nil
|
||||
}
|
||||
err = rpctypes.Error(err)
|
||||
switch {
|
||||
case ctx.Err() != nil && strings.Contains(err.Error(), "context"):
|
||||
err = ctx.Err()
|
||||
case strings.Contains(err.Error(), ErrNoAvailableEndpoints.Error()):
|
||||
if _, ok := err.(rpctypes.EtcdError); ok {
|
||||
return err
|
||||
}
|
||||
code := grpc.Code(err)
|
||||
switch code {
|
||||
case codes.DeadlineExceeded:
|
||||
fallthrough
|
||||
case codes.Canceled:
|
||||
if ctx.Err() != nil {
|
||||
err = ctx.Err()
|
||||
}
|
||||
case codes.Unavailable:
|
||||
err = ErrNoAvailableEndpoints
|
||||
case strings.Contains(err.Error(), grpc.ErrClientConnClosing.Error()):
|
||||
case codes.FailedPrecondition:
|
||||
err = grpc.ErrClientConnClosing
|
||||
}
|
||||
return err
|
||||
|
||||
2
vendor/github.com/coreos/etcd/clientv3/cluster.go
generated
vendored
2
vendor/github.com/coreos/etcd/clientv3/cluster.go
generated
vendored
@@ -78,7 +78,7 @@ func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []strin
|
||||
// it is safe to retry on update.
|
||||
for {
|
||||
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
|
||||
resp, err := c.remote.MemberUpdate(ctx, r)
|
||||
resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false))
|
||||
if err == nil {
|
||||
return (*MemberUpdateResponse)(resp), nil
|
||||
}
|
||||
|
||||
2
vendor/github.com/coreos/etcd/clientv3/compare.go
generated
vendored
2
vendor/github.com/coreos/etcd/clientv3/compare.go
generated
vendored
@@ -36,6 +36,8 @@ func Compare(cmp Cmp, result string, v interface{}) Cmp {
|
||||
switch result {
|
||||
case "=":
|
||||
r = pb.Compare_EQUAL
|
||||
case "!=":
|
||||
r = pb.Compare_NOT_EQUAL
|
||||
case ">":
|
||||
r = pb.Compare_GREATER
|
||||
case "<":
|
||||
|
||||
13
vendor/github.com/coreos/etcd/clientv3/config.go
generated
vendored
13
vendor/github.com/coreos/etcd/clientv3/config.go
generated
vendored
@@ -28,15 +28,16 @@ type Config struct {
|
||||
// Endpoints is a list of URLs
|
||||
Endpoints []string
|
||||
|
||||
// AutoSyncInterval is the interval to update endpoints with its latest members.
|
||||
// 0 disables auto-sync. By default auto-sync is disabled.
|
||||
AutoSyncInterval time.Duration
|
||||
|
||||
// DialTimeout is the timeout for failing to establish a connection.
|
||||
DialTimeout time.Duration
|
||||
|
||||
// TLS holds the client secure credentials, if any.
|
||||
TLS *tls.Config
|
||||
|
||||
// Logger is the logger used by client library.
|
||||
Logger Logger
|
||||
|
||||
// Username is a username for authentication
|
||||
Username string
|
||||
|
||||
@@ -46,6 +47,7 @@ type Config struct {
|
||||
|
||||
type yamlConfig struct {
|
||||
Endpoints []string `json:"endpoints"`
|
||||
AutoSyncInterval time.Duration `json:"auto-sync-interval"`
|
||||
DialTimeout time.Duration `json:"dial-timeout"`
|
||||
InsecureTransport bool `json:"insecure-transport"`
|
||||
InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"`
|
||||
@@ -68,8 +70,9 @@ func configFromFile(fpath string) (*Config, error) {
|
||||
}
|
||||
|
||||
cfg := &Config{
|
||||
Endpoints: yc.Endpoints,
|
||||
DialTimeout: yc.DialTimeout,
|
||||
Endpoints: yc.Endpoints,
|
||||
AutoSyncInterval: yc.AutoSyncInterval,
|
||||
DialTimeout: yc.DialTimeout,
|
||||
}
|
||||
|
||||
if yc.InsecureTransport {
|
||||
|
||||
2
vendor/github.com/coreos/etcd/clientv3/doc.go
generated
vendored
2
vendor/github.com/coreos/etcd/clientv3/doc.go
generated
vendored
@@ -44,7 +44,7 @@
|
||||
// etcd client returns 2 types of errors:
|
||||
//
|
||||
// 1. context error: canceled or deadline exceeded.
|
||||
// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/error.go.
|
||||
// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
|
||||
//
|
||||
// Here is the example code to handle client errors:
|
||||
//
|
||||
|
||||
23
vendor/github.com/coreos/etcd/clientv3/kv.go
generated
vendored
23
vendor/github.com/coreos/etcd/clientv3/kv.go
generated
vendored
@@ -85,6 +85,10 @@ func NewKV(c *Client) KV {
|
||||
return &kv{remote: RetryKVClient(c)}
|
||||
}
|
||||
|
||||
func NewKVFromKVClient(remote pb.KVClient) KV {
|
||||
return &kv{remote: remote}
|
||||
}
|
||||
|
||||
func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
|
||||
r, err := kv.Do(ctx, OpPut(key, val, opts...))
|
||||
return r.put, toErr(ctx, err)
|
||||
@@ -101,7 +105,7 @@ func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*Delete
|
||||
}
|
||||
|
||||
func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
|
||||
resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), grpc.FailFast(false))
|
||||
resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest())
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
@@ -121,6 +125,7 @@ func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
|
||||
if err == nil {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
if isHaltErr(ctx, err) {
|
||||
return resp, toErr(ctx, err)
|
||||
}
|
||||
@@ -137,21 +142,7 @@ func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) {
|
||||
// TODO: handle other ops
|
||||
case tRange:
|
||||
var resp *pb.RangeResponse
|
||||
r := &pb.RangeRequest{
|
||||
Key: op.key,
|
||||
RangeEnd: op.end,
|
||||
Limit: op.limit,
|
||||
Revision: op.rev,
|
||||
Serializable: op.serializable,
|
||||
KeysOnly: op.keysOnly,
|
||||
CountOnly: op.countOnly,
|
||||
}
|
||||
if op.sort != nil {
|
||||
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
|
||||
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
|
||||
}
|
||||
|
||||
resp, err = kv.remote.Range(ctx, r, grpc.FailFast(false))
|
||||
resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false))
|
||||
if err == nil {
|
||||
return OpResponse{get: (*GetResponse)(resp)}, nil
|
||||
}
|
||||
|
||||
83
vendor/github.com/coreos/etcd/clientv3/lease.go
generated
vendored
83
vendor/github.com/coreos/etcd/clientv3/lease.go
generated
vendored
@@ -44,6 +44,21 @@ type LeaseKeepAliveResponse struct {
|
||||
TTL int64
|
||||
}
|
||||
|
||||
// LeaseTimeToLiveResponse is used to convert the protobuf lease timetolive response.
|
||||
type LeaseTimeToLiveResponse struct {
|
||||
*pb.ResponseHeader
|
||||
ID LeaseID `json:"id"`
|
||||
|
||||
// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
|
||||
TTL int64 `json:"ttl"`
|
||||
|
||||
// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
|
||||
GrantedTTL int64 `json:"granted-ttl"`
|
||||
|
||||
// Keys is the list of keys attached to this lease.
|
||||
Keys [][]byte `json:"keys"`
|
||||
}
|
||||
|
||||
const (
|
||||
// defaultTTL is the assumed lease TTL used for the first keepalive
|
||||
// deadline before the actual TTL is known to the client.
|
||||
@@ -54,6 +69,21 @@ const (
|
||||
NoLease LeaseID = 0
|
||||
)
|
||||
|
||||
// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
|
||||
//
|
||||
// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
|
||||
type ErrKeepAliveHalted struct {
|
||||
Reason error
|
||||
}
|
||||
|
||||
func (e ErrKeepAliveHalted) Error() string {
|
||||
s := "etcdclient: leases keep alive halted"
|
||||
if e.Reason != nil {
|
||||
s += ": " + e.Reason.Error()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
type Lease interface {
|
||||
// Grant creates a new lease.
|
||||
Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
|
||||
@@ -61,6 +91,9 @@ type Lease interface {
|
||||
// Revoke revokes the given lease.
|
||||
Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
|
||||
|
||||
// TimeToLive retrieves the lease information of the given lease ID.
|
||||
TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
|
||||
|
||||
// KeepAlive keeps the given lease alive forever.
|
||||
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
|
||||
|
||||
@@ -76,8 +109,9 @@ type Lease interface {
|
||||
type lessor struct {
|
||||
mu sync.Mutex // guards all fields
|
||||
|
||||
// donec is closed when recvKeepAliveLoop stops
|
||||
donec chan struct{}
|
||||
// donec is closed and loopErr is set when recvKeepAliveLoop stops
|
||||
donec chan struct{}
|
||||
loopErr error
|
||||
|
||||
remote pb.LeaseClient
|
||||
|
||||
@@ -141,7 +175,7 @@ func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, err
|
||||
return gresp, nil
|
||||
}
|
||||
if isHaltErr(cctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
return nil, toErr(cctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -164,10 +198,43 @@ func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
done := cancelWhenStop(cancel, l.stopCtx.Done())
|
||||
defer close(done)
|
||||
|
||||
for {
|
||||
r := toLeaseTimeToLiveRequest(id, opts...)
|
||||
resp, err := l.remote.LeaseTimeToLive(cctx, r, grpc.FailFast(false))
|
||||
if err == nil {
|
||||
gresp := &LeaseTimeToLiveResponse{
|
||||
ResponseHeader: resp.GetHeader(),
|
||||
ID: LeaseID(resp.ID),
|
||||
TTL: resp.TTL,
|
||||
GrantedTTL: resp.GrantedTTL,
|
||||
Keys: resp.Keys,
|
||||
}
|
||||
return gresp, nil
|
||||
}
|
||||
if isHaltErr(cctx, err) {
|
||||
return nil, toErr(cctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
|
||||
ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize)
|
||||
|
||||
l.mu.Lock()
|
||||
// ensure that recvKeepAliveLoop is still running
|
||||
select {
|
||||
case <-l.donec:
|
||||
err := l.loopErr
|
||||
l.mu.Unlock()
|
||||
close(ch)
|
||||
return ch, ErrKeepAliveHalted{Reason: err}
|
||||
default:
|
||||
}
|
||||
ka, ok := l.keepAlives[id]
|
||||
if !ok {
|
||||
// create fresh keep alive
|
||||
@@ -275,10 +342,11 @@ func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive
|
||||
return karesp, nil
|
||||
}
|
||||
|
||||
func (l *lessor) recvKeepAliveLoop() {
|
||||
func (l *lessor) recvKeepAliveLoop() (gerr error) {
|
||||
defer func() {
|
||||
l.mu.Lock()
|
||||
close(l.donec)
|
||||
l.loopErr = gerr
|
||||
for _, ka := range l.keepAlives {
|
||||
ka.Close()
|
||||
}
|
||||
@@ -291,13 +359,14 @@ func (l *lessor) recvKeepAliveLoop() {
|
||||
resp, err := stream.Recv()
|
||||
if err != nil {
|
||||
if isHaltErr(l.stopCtx, err) {
|
||||
return
|
||||
return err
|
||||
}
|
||||
stream, serr = l.resetRecv()
|
||||
continue
|
||||
}
|
||||
l.recvKeepAlive(resp)
|
||||
}
|
||||
return serr
|
||||
}
|
||||
|
||||
// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests
|
||||
@@ -347,7 +416,7 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
|
||||
}
|
||||
|
||||
// send update to all channels
|
||||
nextKeepAlive := time.Now().Add(1 + time.Duration(karesp.TTL/3)*time.Second)
|
||||
nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
|
||||
ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
|
||||
for _, ch := range ka.chs {
|
||||
select {
|
||||
@@ -393,7 +462,7 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
|
||||
return
|
||||
}
|
||||
|
||||
tosend := make([]LeaseID, 0)
|
||||
var tosend []LeaseID
|
||||
|
||||
now := time.Now()
|
||||
l.mu.Lock()
|
||||
|
||||
40
vendor/github.com/coreos/etcd/clientv3/logger.go
generated
vendored
40
vendor/github.com/coreos/etcd/clientv3/logger.go
generated
vendored
@@ -15,13 +15,15 @@
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
// Logger is the logger used by client library.
|
||||
// It implements grpclog.Logger interface.
|
||||
type Logger grpclog.Logger
|
||||
|
||||
var (
|
||||
@@ -34,20 +36,36 @@ type settableLogger struct {
|
||||
}
|
||||
|
||||
func init() {
|
||||
// use go's standard logger by default like grpc
|
||||
// disable client side logs by default
|
||||
logger.mu.Lock()
|
||||
logger.l = log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger.l = log.New(ioutil.Discard, "", 0)
|
||||
|
||||
// logger has to override the grpclog at initialization so that
|
||||
// any changes to the grpclog go through logger with locking
|
||||
// instead of through SetLogger
|
||||
//
|
||||
// now updates only happen through settableLogger.set
|
||||
grpclog.SetLogger(&logger)
|
||||
logger.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *settableLogger) Set(l Logger) {
|
||||
// SetLogger sets client-side Logger. By default, logs are disabled.
|
||||
func SetLogger(l Logger) {
|
||||
logger.set(l)
|
||||
}
|
||||
|
||||
// GetLogger returns the current logger.
|
||||
func GetLogger() Logger {
|
||||
return logger.get()
|
||||
}
|
||||
|
||||
func (s *settableLogger) set(l Logger) {
|
||||
s.mu.Lock()
|
||||
logger.l = l
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *settableLogger) Get() Logger {
|
||||
func (s *settableLogger) get() Logger {
|
||||
s.mu.RLock()
|
||||
l := logger.l
|
||||
s.mu.RUnlock()
|
||||
@@ -56,9 +74,9 @@ func (s *settableLogger) Get() Logger {
|
||||
|
||||
// implement the grpclog.Logger interface
|
||||
|
||||
func (s *settableLogger) Fatal(args ...interface{}) { s.Get().Fatal(args...) }
|
||||
func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.Get().Fatalf(format, args...) }
|
||||
func (s *settableLogger) Fatalln(args ...interface{}) { s.Get().Fatalln(args...) }
|
||||
func (s *settableLogger) Print(args ...interface{}) { s.Get().Print(args...) }
|
||||
func (s *settableLogger) Printf(format string, args ...interface{}) { s.Get().Printf(format, args...) }
|
||||
func (s *settableLogger) Println(args ...interface{}) { s.Get().Println(args...) }
|
||||
func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) }
|
||||
func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) }
|
||||
func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) }
|
||||
func (s *settableLogger) Print(args ...interface{}) { s.get().Print(args...) }
|
||||
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Printf(format, args...) }
|
||||
func (s *settableLogger) Println(args ...interface{}) { s.get().Println(args...) }
|
||||
|
||||
145
vendor/github.com/coreos/etcd/clientv3/op.go
generated
vendored
145
vendor/github.com/coreos/etcd/clientv3/op.go
generated
vendored
@@ -14,9 +14,7 @@
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
)
|
||||
import pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
type opType int
|
||||
|
||||
@@ -43,6 +41,10 @@ type Op struct {
|
||||
serializable bool
|
||||
keysOnly bool
|
||||
countOnly bool
|
||||
minModRev int64
|
||||
maxModRev int64
|
||||
minCreateRev int64
|
||||
maxCreateRev int64
|
||||
|
||||
// for range, watch
|
||||
rev int64
|
||||
@@ -52,29 +54,45 @@ type Op struct {
|
||||
|
||||
// progressNotify is for progress updates.
|
||||
progressNotify bool
|
||||
// createdNotify is for created event
|
||||
createdNotify bool
|
||||
// filters for watchers
|
||||
filterPut bool
|
||||
filterDelete bool
|
||||
|
||||
// for put
|
||||
val []byte
|
||||
leaseID LeaseID
|
||||
}
|
||||
|
||||
func (op Op) toRangeRequest() *pb.RangeRequest {
|
||||
if op.t != tRange {
|
||||
panic("op.t != tRange")
|
||||
}
|
||||
r := &pb.RangeRequest{
|
||||
Key: op.key,
|
||||
RangeEnd: op.end,
|
||||
Limit: op.limit,
|
||||
Revision: op.rev,
|
||||
Serializable: op.serializable,
|
||||
KeysOnly: op.keysOnly,
|
||||
CountOnly: op.countOnly,
|
||||
MinModRevision: op.minModRev,
|
||||
MaxModRevision: op.maxModRev,
|
||||
MinCreateRevision: op.minCreateRev,
|
||||
MaxCreateRevision: op.maxCreateRev,
|
||||
}
|
||||
if op.sort != nil {
|
||||
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
|
||||
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (op Op) toRequestOp() *pb.RequestOp {
|
||||
switch op.t {
|
||||
case tRange:
|
||||
r := &pb.RangeRequest{
|
||||
Key: op.key,
|
||||
RangeEnd: op.end,
|
||||
Limit: op.limit,
|
||||
Revision: op.rev,
|
||||
Serializable: op.serializable,
|
||||
KeysOnly: op.keysOnly,
|
||||
CountOnly: op.countOnly,
|
||||
}
|
||||
if op.sort != nil {
|
||||
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
|
||||
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
|
||||
}
|
||||
return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: r}}
|
||||
return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}}
|
||||
case tPut:
|
||||
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV}
|
||||
return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
|
||||
@@ -112,6 +130,14 @@ func OpDelete(key string, opts ...OpOption) Op {
|
||||
panic("unexpected serializable in delete")
|
||||
case ret.countOnly:
|
||||
panic("unexpected countOnly in delete")
|
||||
case ret.minModRev != 0, ret.maxModRev != 0:
|
||||
panic("unexpected mod revision filter in delete")
|
||||
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
|
||||
panic("unexpected create revision filter in delete")
|
||||
case ret.filterDelete, ret.filterPut:
|
||||
panic("unexpected filter in delete")
|
||||
case ret.createdNotify:
|
||||
panic("unexpected createdNotify in delete")
|
||||
}
|
||||
return ret
|
||||
}
|
||||
@@ -131,7 +157,15 @@ func OpPut(key, val string, opts ...OpOption) Op {
|
||||
case ret.serializable:
|
||||
panic("unexpected serializable in put")
|
||||
case ret.countOnly:
|
||||
panic("unexpected countOnly in delete")
|
||||
panic("unexpected countOnly in put")
|
||||
case ret.minModRev != 0, ret.maxModRev != 0:
|
||||
panic("unexpected mod revision filter in put")
|
||||
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
|
||||
panic("unexpected create revision filter in put")
|
||||
case ret.filterDelete, ret.filterPut:
|
||||
panic("unexpected filter in put")
|
||||
case ret.createdNotify:
|
||||
panic("unexpected createdNotify in put")
|
||||
}
|
||||
return ret
|
||||
}
|
||||
@@ -149,7 +183,11 @@ func opWatch(key string, opts ...OpOption) Op {
|
||||
case ret.serializable:
|
||||
panic("unexpected serializable in watch")
|
||||
case ret.countOnly:
|
||||
panic("unexpected countOnly in delete")
|
||||
panic("unexpected countOnly in watch")
|
||||
case ret.minModRev != 0, ret.maxModRev != 0:
|
||||
panic("unexpected mod revision filter in watch")
|
||||
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
|
||||
panic("unexpected create revision filter in watch")
|
||||
}
|
||||
return ret
|
||||
}
|
||||
@@ -181,6 +219,14 @@ func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } }
|
||||
// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'.
|
||||
func WithSort(target SortTarget, order SortOrder) OpOption {
|
||||
return func(op *Op) {
|
||||
if target == SortByKey && order == SortAscend {
|
||||
// If order != SortNone, server fetches the entire key-space,
|
||||
// and then applies the sort and limit, if provided.
|
||||
// Since current mvcc.Range implementation returns results
|
||||
// sorted by keys in lexicographically ascending order,
|
||||
// client should ignore SortOrder if the target is SortByKey.
|
||||
order = SortNone
|
||||
}
|
||||
op.sort = &SortOption{target, order}
|
||||
}
|
||||
}
|
||||
@@ -245,6 +291,18 @@ func WithCountOnly() OpOption {
|
||||
return func(op *Op) { op.countOnly = true }
|
||||
}
|
||||
|
||||
// WithMinModRev filters out keys for Get with modification revisions less than the given revision.
|
||||
func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } }
|
||||
|
||||
// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision.
|
||||
func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } }
|
||||
|
||||
// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision.
|
||||
func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } }
|
||||
|
||||
// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision.
|
||||
func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } }
|
||||
|
||||
// WithFirstCreate gets the key with the oldest creation revision in the request range.
|
||||
func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }
|
||||
|
||||
@@ -268,7 +326,8 @@ func withTop(target SortTarget, order SortOrder) []OpOption {
|
||||
return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)}
|
||||
}
|
||||
|
||||
// WithProgressNotify makes watch server send periodic progress updates.
|
||||
// WithProgressNotify makes watch server send periodic progress updates
|
||||
// every 10 minutes when there is no incoming events.
|
||||
// Progress updates have zero events in WatchResponse.
|
||||
func WithProgressNotify() OpOption {
|
||||
return func(op *Op) {
|
||||
@@ -276,6 +335,23 @@ func WithProgressNotify() OpOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithCreatedNotify makes watch server sends the created event.
|
||||
func WithCreatedNotify() OpOption {
|
||||
return func(op *Op) {
|
||||
op.createdNotify = true
|
||||
}
|
||||
}
|
||||
|
||||
// WithFilterPut discards PUT events from the watcher.
|
||||
func WithFilterPut() OpOption {
|
||||
return func(op *Op) { op.filterPut = true }
|
||||
}
|
||||
|
||||
// WithFilterDelete discards DELETE events from the watcher.
|
||||
func WithFilterDelete() OpOption {
|
||||
return func(op *Op) { op.filterDelete = true }
|
||||
}
|
||||
|
||||
// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted,
|
||||
// nothing will be returned.
|
||||
func WithPrevKV() OpOption {
|
||||
@@ -283,3 +359,32 @@ func WithPrevKV() OpOption {
|
||||
op.prevKV = true
|
||||
}
|
||||
}
|
||||
|
||||
// LeaseOp represents an Operation that lease can execute.
|
||||
type LeaseOp struct {
|
||||
id LeaseID
|
||||
|
||||
// for TimeToLive
|
||||
attachedKeys bool
|
||||
}
|
||||
|
||||
// LeaseOption configures lease operations.
|
||||
type LeaseOption func(*LeaseOp)
|
||||
|
||||
func (op *LeaseOp) applyOpts(opts []LeaseOption) {
|
||||
for _, opt := range opts {
|
||||
opt(op)
|
||||
}
|
||||
}
|
||||
|
||||
// WithAttachedKeys requests lease timetolive API to return
|
||||
// attached keys of given lease ID.
|
||||
func WithAttachedKeys() LeaseOption {
|
||||
return func(op *LeaseOp) { op.attachedKeys = true }
|
||||
}
|
||||
|
||||
func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest {
|
||||
ret := &LeaseOp{id: id}
|
||||
ret.applyOpts(opts)
|
||||
return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys}
|
||||
}
|
||||
|
||||
120
vendor/github.com/coreos/etcd/clientv3/retry.go
generated
vendored
120
vendor/github.com/coreos/etcd/clientv3/retry.go
generated
vendored
@@ -15,68 +15,117 @@
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
type rpcFunc func(ctx context.Context) error
|
||||
type retryRpcFunc func(context.Context, rpcFunc)
|
||||
type retryRpcFunc func(context.Context, rpcFunc) error
|
||||
|
||||
func (c *Client) newRetryWrapper() retryRpcFunc {
|
||||
return func(rpcCtx context.Context, f rpcFunc) {
|
||||
return func(rpcCtx context.Context, f rpcFunc) error {
|
||||
for {
|
||||
err := f(rpcCtx)
|
||||
// ignore grpc conn closing on fail-fast calls; they are transient errors
|
||||
if err == nil || !isConnClosing(err) {
|
||||
return
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
eErr := rpctypes.Error(err)
|
||||
// always stop retry on etcd errors
|
||||
if _, ok := eErr.(rpctypes.EtcdError); ok {
|
||||
return err
|
||||
}
|
||||
|
||||
// only retry if unavailable
|
||||
if grpc.Code(err) != codes.Unavailable {
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-c.balancer.ConnectNotify():
|
||||
case <-rpcCtx.Done():
|
||||
return rpcCtx.Err()
|
||||
case <-c.ctx.Done():
|
||||
return
|
||||
return c.ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type retryKVClient struct {
|
||||
pb.KVClient
|
||||
retryf retryRpcFunc
|
||||
func (c *Client) newAuthRetryWrapper() retryRpcFunc {
|
||||
return func(rpcCtx context.Context, f rpcFunc) error {
|
||||
for {
|
||||
err := f(rpcCtx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// always stop retry on etcd errors other than invalid auth token
|
||||
if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
|
||||
gterr := c.getToken(rpcCtx)
|
||||
if gterr != nil {
|
||||
return err // return the original error for simplicity
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RetryKVClient implements a KVClient that uses the client's FailFast retry policy.
|
||||
func RetryKVClient(c *Client) pb.KVClient {
|
||||
return &retryKVClient{pb.NewKVClient(c.conn), c.retryWrapper}
|
||||
retryWrite := &retryWriteKVClient{pb.NewKVClient(c.conn), c.retryWrapper}
|
||||
return &retryKVClient{&retryWriteKVClient{retryWrite, c.retryAuthWrapper}}
|
||||
}
|
||||
|
||||
func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
||||
rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
type retryKVClient struct {
|
||||
*retryWriteKVClient
|
||||
}
|
||||
|
||||
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.retryWriteKVClient.Range(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type retryWriteKVClient struct {
|
||||
pb.KVClient
|
||||
retryf retryRpcFunc
|
||||
}
|
||||
|
||||
func (rkv *retryWriteKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.Put(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
||||
rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
func (rkv *retryWriteKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
||||
rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
func (rkv *retryWriteKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.Txn(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
||||
rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
func (rkv *retryWriteKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.Compact(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@@ -90,11 +139,12 @@ type retryLeaseClient struct {
|
||||
|
||||
// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy.
|
||||
func RetryLeaseClient(c *Client) pb.LeaseClient {
|
||||
return &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper}
|
||||
retry := &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper}
|
||||
return &retryLeaseClient{retry, c.retryAuthWrapper}
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
|
||||
rlc.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@@ -103,7 +153,7 @@ func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRe
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
|
||||
rlc.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@@ -121,7 +171,7 @@ func RetryClusterClient(c *Client) pb.ClusterClient {
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
|
||||
rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@@ -129,7 +179,7 @@ func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRe
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
|
||||
rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@@ -137,7 +187,7 @@ func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRe
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
|
||||
rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@@ -155,7 +205,7 @@ func RetryAuthClient(c *Client) pb.AuthClient {
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
|
||||
rac.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@@ -163,7 +213,7 @@ func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableReq
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
|
||||
rac.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@@ -171,7 +221,7 @@ func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableR
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
|
||||
rac.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserAdd(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@@ -179,7 +229,7 @@ func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddReque
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
|
||||
rac.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserDelete(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@@ -187,7 +237,7 @@ func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDelet
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
|
||||
rac.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@@ -195,7 +245,7 @@ func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthU
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
|
||||
rac.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@@ -203,7 +253,7 @@ func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGr
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
|
||||
rac.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@@ -211,7 +261,7 @@ func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserR
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
|
||||
rac.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@@ -219,7 +269,7 @@ func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddReque
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
|
||||
rac.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@@ -227,7 +277,7 @@ func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDelet
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
|
||||
rac.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@@ -235,7 +285,7 @@ func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.Auth
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
|
||||
rac.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
|
||||
8
vendor/github.com/coreos/etcd/clientv3/txn.go
generated
vendored
8
vendor/github.com/coreos/etcd/clientv3/txn.go
generated
vendored
@@ -19,6 +19,7 @@ import (
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Txn is the interface that wraps mini-transactions.
|
||||
@@ -152,7 +153,12 @@ func (txn *txn) Commit() (*TxnResponse, error) {
|
||||
|
||||
func (txn *txn) commit() (*TxnResponse, error) {
|
||||
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
|
||||
resp, err := txn.kv.remote.Txn(txn.ctx, r)
|
||||
|
||||
var opts []grpc.CallOption
|
||||
if !txn.isWrite {
|
||||
opts = []grpc.CallOption{grpc.FailFast(false)}
|
||||
}
|
||||
resp, err := txn.kv.remote.Txn(txn.ctx, r, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
83
vendor/github.com/coreos/etcd/clientv3/watch.go
generated
vendored
83
vendor/github.com/coreos/etcd/clientv3/watch.go
generated
vendored
@@ -61,8 +61,8 @@ type WatchResponse struct {
|
||||
// the channel sends a final response that has Canceled set to true with a non-nil Err().
|
||||
Canceled bool
|
||||
|
||||
// created is used to indicate the creation of the watcher.
|
||||
created bool
|
||||
// Created is used to indicate the creation of the watcher.
|
||||
Created bool
|
||||
|
||||
closeErr error
|
||||
}
|
||||
@@ -92,7 +92,7 @@ func (wr *WatchResponse) Err() error {
|
||||
|
||||
// IsProgressNotify returns true if the WatchResponse is progress notification.
|
||||
func (wr *WatchResponse) IsProgressNotify() bool {
|
||||
return len(wr.Events) == 0 && !wr.Canceled && !wr.created && wr.CompactRevision == 0 && wr.Header.Revision != 0
|
||||
return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0
|
||||
}
|
||||
|
||||
// watcher implements the Watcher interface
|
||||
@@ -101,6 +101,7 @@ type watcher struct {
|
||||
|
||||
// mu protects the grpc streams map
|
||||
mu sync.RWMutex
|
||||
|
||||
// streams holds all the active grpc streams keyed by ctx value.
|
||||
streams map[string]*watchGrpcStream
|
||||
}
|
||||
@@ -131,6 +132,8 @@ type watchGrpcStream struct {
|
||||
errc chan error
|
||||
// closingc gets the watcherStream of closing watchers
|
||||
closingc chan *watcherStream
|
||||
// wg is Done when all substream goroutines have exited
|
||||
wg sync.WaitGroup
|
||||
|
||||
// resumec closes to signal that all substreams should begin resuming
|
||||
resumec chan struct{}
|
||||
@@ -144,8 +147,12 @@ type watchRequest struct {
|
||||
key string
|
||||
end string
|
||||
rev int64
|
||||
// progressNotify is for progress updates.
|
||||
// send created notification event if this field is true
|
||||
createdNotify bool
|
||||
// progressNotify is for progress updates
|
||||
progressNotify bool
|
||||
// filters is the list of events to filter out
|
||||
filters []pb.WatchCreateRequest_FilterType
|
||||
// get the previous key-value pair before the event happens
|
||||
prevKV bool
|
||||
// retc receives a chan WatchResponse once the watcher is established
|
||||
@@ -173,8 +180,12 @@ type watcherStream struct {
|
||||
}
|
||||
|
||||
func NewWatcher(c *Client) Watcher {
|
||||
return NewWatchFromWatchClient(pb.NewWatchClient(c.conn))
|
||||
}
|
||||
|
||||
func NewWatchFromWatchClient(wc pb.WatchClient) Watcher {
|
||||
return &watcher{
|
||||
remote: pb.NewWatchClient(c.conn),
|
||||
remote: wc,
|
||||
streams: make(map[string]*watchGrpcStream),
|
||||
}
|
||||
}
|
||||
@@ -215,12 +226,22 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
|
||||
func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
|
||||
ow := opWatch(key, opts...)
|
||||
|
||||
var filters []pb.WatchCreateRequest_FilterType
|
||||
if ow.filterPut {
|
||||
filters = append(filters, pb.WatchCreateRequest_NOPUT)
|
||||
}
|
||||
if ow.filterDelete {
|
||||
filters = append(filters, pb.WatchCreateRequest_NODELETE)
|
||||
}
|
||||
|
||||
wr := &watchRequest{
|
||||
ctx: ctx,
|
||||
createdNotify: ow.createdNotify,
|
||||
key: string(ow.key),
|
||||
end: string(ow.end),
|
||||
rev: ow.rev,
|
||||
progressNotify: ow.progressNotify,
|
||||
filters: filters,
|
||||
prevKV: ow.prevKV,
|
||||
retc: make(chan chan WatchResponse, 1),
|
||||
}
|
||||
@@ -374,18 +395,20 @@ func (w *watchGrpcStream) run() {
|
||||
for _, ws := range w.substreams {
|
||||
if _, ok := closing[ws]; !ok {
|
||||
close(ws.recvc)
|
||||
closing[ws] = struct{}{}
|
||||
}
|
||||
}
|
||||
for _, ws := range w.resuming {
|
||||
if _, ok := closing[ws]; ws != nil && !ok {
|
||||
close(ws.recvc)
|
||||
closing[ws] = struct{}{}
|
||||
}
|
||||
}
|
||||
w.joinSubstreams()
|
||||
for toClose := len(w.substreams) + len(w.resuming); toClose > 0; toClose-- {
|
||||
for range closing {
|
||||
w.closeSubstream(<-w.closingc)
|
||||
}
|
||||
|
||||
w.wg.Wait()
|
||||
w.owner.closeStream(w)
|
||||
}()
|
||||
|
||||
@@ -410,6 +433,7 @@ func (w *watchGrpcStream) run() {
|
||||
}
|
||||
|
||||
ws.donec = make(chan struct{})
|
||||
w.wg.Add(1)
|
||||
go w.serveSubstream(ws, w.resumec)
|
||||
|
||||
// queue up for watcher creation/resume
|
||||
@@ -458,7 +482,7 @@ func (w *watchGrpcStream) run() {
|
||||
}
|
||||
// watch client failed to recv; spawn another if possible
|
||||
case err := <-w.errc:
|
||||
if toErr(w.ctx, err) == v3rpc.ErrNoLeader {
|
||||
if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
|
||||
closeErr = err
|
||||
return
|
||||
}
|
||||
@@ -508,7 +532,7 @@ func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
|
||||
Header: *pbresp.Header,
|
||||
Events: events,
|
||||
CompactRevision: pbresp.CompactRevision,
|
||||
created: pbresp.Created,
|
||||
Created: pbresp.Created,
|
||||
Canceled: pbresp.Canceled,
|
||||
}
|
||||
select {
|
||||
@@ -555,6 +579,7 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{
|
||||
if !resuming {
|
||||
w.closingc <- ws
|
||||
}
|
||||
w.wg.Done()
|
||||
}()
|
||||
|
||||
emptyWr := &WatchResponse{}
|
||||
@@ -562,14 +587,6 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{
|
||||
curWr := emptyWr
|
||||
outc := ws.outc
|
||||
|
||||
if len(ws.buf) > 0 && ws.buf[0].created {
|
||||
select {
|
||||
case ws.initReq.retc <- ws.outc:
|
||||
default:
|
||||
}
|
||||
ws.buf = ws.buf[1:]
|
||||
}
|
||||
|
||||
if len(ws.buf) > 0 {
|
||||
curWr = ws.buf[0]
|
||||
} else {
|
||||
@@ -587,13 +604,35 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{
|
||||
// shutdown from closeSubstream
|
||||
return
|
||||
}
|
||||
// TODO pause channel if buffer gets too large
|
||||
ws.buf = append(ws.buf, wr)
|
||||
|
||||
if wr.Created {
|
||||
if ws.initReq.retc != nil {
|
||||
ws.initReq.retc <- ws.outc
|
||||
// to prevent next write from taking the slot in buffered channel
|
||||
// and posting duplicate create events
|
||||
ws.initReq.retc = nil
|
||||
|
||||
// send first creation event only if requested
|
||||
if ws.initReq.createdNotify {
|
||||
ws.outc <- *wr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nextRev = wr.Header.Revision
|
||||
if len(wr.Events) > 0 {
|
||||
nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
|
||||
}
|
||||
ws.initReq.rev = nextRev
|
||||
|
||||
// created event is already sent above,
|
||||
// watcher should not post duplicate events
|
||||
if wr.Created {
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO pause channel if buffer gets too large
|
||||
ws.buf = append(ws.buf, wr)
|
||||
case <-w.ctx.Done():
|
||||
return
|
||||
case <-ws.initReq.ctx.Done():
|
||||
@@ -639,6 +678,7 @@ func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
|
||||
continue
|
||||
}
|
||||
ws.donec = make(chan struct{})
|
||||
w.wg.Add(1)
|
||||
go w.serveSubstream(ws, w.resumec)
|
||||
}
|
||||
|
||||
@@ -659,6 +699,10 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str
|
||||
go func(ws *watcherStream) {
|
||||
defer wg.Done()
|
||||
if ws.closing {
|
||||
if ws.initReq.ctx.Err() != nil && ws.outc != nil {
|
||||
close(ws.outc)
|
||||
ws.outc = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
select {
|
||||
@@ -719,6 +763,7 @@ func (wr *watchRequest) toPB() *pb.WatchRequest {
|
||||
Key: []byte(wr.key),
|
||||
RangeEnd: []byte(wr.end),
|
||||
ProgressNotify: wr.progressNotify,
|
||||
Filters: wr.filters,
|
||||
PrevKv: wr.prevKV,
|
||||
}
|
||||
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
|
||||
|
||||
2
vendor/github.com/coreos/etcd/compactor/compactor.go
generated
vendored
2
vendor/github.com/coreos/etcd/compactor/compactor.go
generated
vendored
@@ -26,7 +26,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver")
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "compactor")
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
16
vendor/github.com/coreos/etcd/discovery/discovery.go
generated
vendored
16
vendor/github.com/coreos/etcd/discovery/discovery.go
generated
vendored
@@ -52,7 +52,8 @@ var (
|
||||
|
||||
var (
|
||||
// Number of retries discovery will attempt before giving up and erroring out.
|
||||
nRetries = uint(math.MaxUint32)
|
||||
nRetries = uint(math.MaxUint32)
|
||||
maxExpoentialRetries = uint(8)
|
||||
)
|
||||
|
||||
// JoinCluster will connect to the discovery service at the given url, and
|
||||
@@ -243,7 +244,7 @@ func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) {
|
||||
}
|
||||
return nil, 0, 0, err
|
||||
}
|
||||
nodes := make([]*client.Node, 0)
|
||||
var nodes []*client.Node
|
||||
// append non-config keys to nodes
|
||||
for _, n := range resp.Node.Nodes {
|
||||
if !(path.Base(n.Key) == path.Base(configKey)) {
|
||||
@@ -268,9 +269,14 @@ func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) {
|
||||
|
||||
func (d *discovery) logAndBackoffForRetry(step string) {
|
||||
d.retries++
|
||||
retryTime := time.Second * (0x1 << d.retries)
|
||||
plog.Infof("%s: error connecting to %s, retrying in %s", step, d.url, retryTime)
|
||||
d.clock.Sleep(retryTime)
|
||||
// logAndBackoffForRetry stops exponential backoff when the retries are more than maxExpoentialRetries and is set to a constant backoff afterward.
|
||||
retries := d.retries
|
||||
if retries > maxExpoentialRetries {
|
||||
retries = maxExpoentialRetries
|
||||
}
|
||||
retryTimeInSecond := time.Duration(0x1<<retries) * time.Second
|
||||
plog.Infof("%s: error connecting to %s, retrying in %s", step, d.url, retryTimeInSecond)
|
||||
d.clock.Sleep(retryTimeInSecond)
|
||||
}
|
||||
|
||||
func (d *discovery) checkClusterRetry() ([]*client.Node, int, uint64, error) {
|
||||
|
||||
33
vendor/github.com/coreos/etcd/discovery/srv.go
generated
vendored
33
vendor/github.com/coreos/etcd/discovery/srv.go
generated
vendored
@@ -17,6 +17,7 @@ package discovery
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
@@ -33,9 +34,8 @@ var (
|
||||
// Also doesn't do any lookups for the token (though it could)
|
||||
// Also sees each entry as a separate instance.
|
||||
func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (string, string, error) {
|
||||
stringParts := make([]string, 0)
|
||||
tempName := int(0)
|
||||
tcpAPUrls := make([]string, 0)
|
||||
tcp2ap := make(map[string]url.URL)
|
||||
|
||||
// First, resolve the apurls
|
||||
for _, url := range apurls {
|
||||
@@ -44,10 +44,11 @@ func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (st
|
||||
plog.Errorf("couldn't resolve host %s during SRV discovery", url.Host)
|
||||
return "", "", err
|
||||
}
|
||||
tcpAPUrls = append(tcpAPUrls, tcpAddr.String())
|
||||
tcp2ap[tcpAddr.String()] = url
|
||||
}
|
||||
|
||||
updateNodeMap := func(service, prefix string) error {
|
||||
stringParts := []string{}
|
||||
updateNodeMap := func(service, scheme string) error {
|
||||
_, addrs, err := lookupSRV(service, "tcp", dns)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -61,35 +62,37 @@ func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (st
|
||||
continue
|
||||
}
|
||||
n := ""
|
||||
for _, url := range tcpAPUrls {
|
||||
if url == tcpAddr.String() {
|
||||
n = name
|
||||
}
|
||||
url, ok := tcp2ap[tcpAddr.String()]
|
||||
if ok {
|
||||
n = name
|
||||
}
|
||||
if n == "" {
|
||||
n = fmt.Sprintf("%d", tempName)
|
||||
tempName += 1
|
||||
tempName++
|
||||
}
|
||||
// SRV records have a trailing dot but URL shouldn't.
|
||||
shortHost := strings.TrimSuffix(srv.Target, ".")
|
||||
urlHost := net.JoinHostPort(shortHost, port)
|
||||
stringParts = append(stringParts, fmt.Sprintf("%s=%s%s", n, prefix, urlHost))
|
||||
plog.Noticef("got bootstrap from DNS for %s at %s%s", service, prefix, urlHost)
|
||||
stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost))
|
||||
plog.Noticef("got bootstrap from DNS for %s at %s://%s", service, scheme, urlHost)
|
||||
if ok && url.Scheme != scheme {
|
||||
plog.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
failCount := 0
|
||||
err := updateNodeMap("etcd-server-ssl", "https://")
|
||||
err := updateNodeMap("etcd-server-ssl", "https")
|
||||
srvErr := make([]string, 2)
|
||||
if err != nil {
|
||||
srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _etcd-server-ssl %s", err)
|
||||
failCount += 1
|
||||
failCount++
|
||||
}
|
||||
err = updateNodeMap("etcd-server", "http://")
|
||||
err = updateNodeMap("etcd-server", "http")
|
||||
if err != nil {
|
||||
srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _etcd-server %s", err)
|
||||
failCount += 1
|
||||
failCount++
|
||||
}
|
||||
if failCount == 2 {
|
||||
plog.Warningf(srvErr[0])
|
||||
|
||||
5
vendor/github.com/coreos/etcd/etcdserver/api/capability.go
generated
vendored
5
vendor/github.com/coreos/etcd/etcdserver/api/capability.go
generated
vendored
@@ -30,15 +30,14 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver", "api")
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api")
|
||||
|
||||
// capabilityMaps is a static map of version to capability map.
|
||||
// the base capabilities is the set of capability 2.0 supports.
|
||||
capabilityMaps = map[string]map[Capability]bool{
|
||||
"2.1.0": {AuthCapability: true},
|
||||
"2.2.0": {AuthCapability: true},
|
||||
"2.3.0": {AuthCapability: true},
|
||||
"3.0.0": {AuthCapability: true, V3rpcCapability: true},
|
||||
"3.1.0": {AuthCapability: true, V3rpcCapability: true},
|
||||
}
|
||||
|
||||
enableMapMu sync.RWMutex
|
||||
|
||||
104
vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go
generated
vendored
104
vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go
generated
vendored
@@ -21,7 +21,6 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/pprof"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
@@ -57,7 +56,6 @@ const (
|
||||
healthPath = "/health"
|
||||
versionPath = "/version"
|
||||
configPath = "/config"
|
||||
pprofPrefix = "/debug/pprof"
|
||||
)
|
||||
|
||||
// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests.
|
||||
@@ -113,23 +111,6 @@ func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http
|
||||
mux.Handle(deprecatedMachinesPrefix, dmh)
|
||||
handleAuth(mux, sech)
|
||||
|
||||
if server.IsPprofEnabled() {
|
||||
plog.Infof("pprof is enabled under %s", pprofPrefix)
|
||||
|
||||
mux.HandleFunc(pprofPrefix, pprof.Index)
|
||||
mux.HandleFunc(pprofPrefix+"/profile", pprof.Profile)
|
||||
mux.HandleFunc(pprofPrefix+"/symbol", pprof.Symbol)
|
||||
mux.HandleFunc(pprofPrefix+"/cmdline", pprof.Cmdline)
|
||||
// TODO: currently, we don't create an entry for pprof.Trace,
|
||||
// because go 1.4 doesn't provide it. After support of go 1.4 is dropped,
|
||||
// we should add the entry.
|
||||
|
||||
mux.Handle(pprofPrefix+"/heap", pprof.Handler("heap"))
|
||||
mux.Handle(pprofPrefix+"/goroutine", pprof.Handler("goroutine"))
|
||||
mux.Handle(pprofPrefix+"/threadcreate", pprof.Handler("threadcreate"))
|
||||
mux.Handle(pprofPrefix+"/block", pprof.Handler("block"))
|
||||
}
|
||||
|
||||
return requestLogger(mux)
|
||||
}
|
||||
|
||||
@@ -153,7 +134,7 @@ func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
defer cancel()
|
||||
clock := clockwork.NewRealClock()
|
||||
startTime := clock.Now()
|
||||
rr, err := parseKeyRequest(r, clock)
|
||||
rr, noValueOnSuccess, err := parseKeyRequest(r, clock)
|
||||
if err != nil {
|
||||
writeKeyError(w, err)
|
||||
return
|
||||
@@ -175,7 +156,7 @@ func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
switch {
|
||||
case resp.Event != nil:
|
||||
if err := writeKeyEvent(w, resp.Event, h.timer); err != nil {
|
||||
if err := writeKeyEvent(w, resp.Event, noValueOnSuccess, h.timer); err != nil {
|
||||
// Should never be reached
|
||||
plog.Errorf("error writing event (%v)", err)
|
||||
}
|
||||
@@ -365,32 +346,23 @@ func serveVars(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, "\n}\n")
|
||||
}
|
||||
|
||||
// TODO: change etcdserver to raft interface when we have it.
|
||||
// add test for healthHandler when we have the interface ready.
|
||||
func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if !allowMethod(w, r.Method, "GET") {
|
||||
return
|
||||
}
|
||||
|
||||
if uint64(server.Leader()) == raft.None {
|
||||
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
// wait for raft's progress
|
||||
index := server.Index()
|
||||
for i := 0; i < 3; i++ {
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
if server.Index() > index {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(`{"health": "true"}`))
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
if _, err := server.Do(ctx, etcdserverpb.Request{Method: "QGET"}); err != nil {
|
||||
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
|
||||
return
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(`{"health": "true"}`))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -449,19 +421,20 @@ func logHandleFunc(w http.ResponseWriter, r *http.Request) {
|
||||
// parseKeyRequest converts a received http.Request on keysPrefix to
|
||||
// a server Request, performing validation of supplied fields as appropriate.
|
||||
// If any validation fails, an empty Request and non-nil error is returned.
|
||||
func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Request, error) {
|
||||
func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Request, bool, error) {
|
||||
noValueOnSuccess := false
|
||||
emptyReq := etcdserverpb.Request{}
|
||||
|
||||
err := r.ParseForm()
|
||||
if err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidForm,
|
||||
err.Error(),
|
||||
)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(r.URL.Path, keysPrefix) {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidForm,
|
||||
"incorrect key prefix",
|
||||
)
|
||||
@@ -470,13 +443,13 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
||||
|
||||
var pIdx, wIdx uint64
|
||||
if pIdx, err = getUint64(r.Form, "prevIndex"); err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeIndexNaN,
|
||||
`invalid value for "prevIndex"`,
|
||||
)
|
||||
}
|
||||
if wIdx, err = getUint64(r.Form, "waitIndex"); err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeIndexNaN,
|
||||
`invalid value for "waitIndex"`,
|
||||
)
|
||||
@@ -484,45 +457,45 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
||||
|
||||
var rec, sort, wait, dir, quorum, stream bool
|
||||
if rec, err = getBool(r.Form, "recursive"); err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidField,
|
||||
`invalid value for "recursive"`,
|
||||
)
|
||||
}
|
||||
if sort, err = getBool(r.Form, "sorted"); err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidField,
|
||||
`invalid value for "sorted"`,
|
||||
)
|
||||
}
|
||||
if wait, err = getBool(r.Form, "wait"); err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidField,
|
||||
`invalid value for "wait"`,
|
||||
)
|
||||
}
|
||||
// TODO(jonboulle): define what parameters dir is/isn't compatible with?
|
||||
if dir, err = getBool(r.Form, "dir"); err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidField,
|
||||
`invalid value for "dir"`,
|
||||
)
|
||||
}
|
||||
if quorum, err = getBool(r.Form, "quorum"); err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidField,
|
||||
`invalid value for "quorum"`,
|
||||
)
|
||||
}
|
||||
if stream, err = getBool(r.Form, "stream"); err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidField,
|
||||
`invalid value for "stream"`,
|
||||
)
|
||||
}
|
||||
|
||||
if wait && r.Method != "GET" {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidField,
|
||||
`"wait" can only be used with GET requests`,
|
||||
)
|
||||
@@ -530,19 +503,26 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
||||
|
||||
pV := r.FormValue("prevValue")
|
||||
if _, ok := r.Form["prevValue"]; ok && pV == "" {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodePrevValueRequired,
|
||||
`"prevValue" cannot be empty`,
|
||||
)
|
||||
}
|
||||
|
||||
if noValueOnSuccess, err = getBool(r.Form, "noValueOnSuccess"); err != nil {
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidField,
|
||||
`invalid value for "noValueOnSuccess"`,
|
||||
)
|
||||
}
|
||||
|
||||
// TTL is nullable, so leave it null if not specified
|
||||
// or an empty string
|
||||
var ttl *uint64
|
||||
if len(r.FormValue("ttl")) > 0 {
|
||||
i, err := getUint64(r.Form, "ttl")
|
||||
if err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeTTLNaN,
|
||||
`invalid value for "ttl"`,
|
||||
)
|
||||
@@ -555,7 +535,7 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
||||
if _, ok := r.Form["prevExist"]; ok {
|
||||
bv, err := getBool(r.Form, "prevExist")
|
||||
if err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidField,
|
||||
"invalid value for prevExist",
|
||||
)
|
||||
@@ -568,7 +548,7 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
||||
if _, ok := r.Form["refresh"]; ok {
|
||||
bv, err := getBool(r.Form, "refresh")
|
||||
if err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidField,
|
||||
"invalid value for refresh",
|
||||
)
|
||||
@@ -577,13 +557,13 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
||||
if refresh != nil && *refresh {
|
||||
val := r.FormValue("value")
|
||||
if _, ok := r.Form["value"]; ok && val != "" {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeRefreshValue,
|
||||
`A value was provided on a refresh`,
|
||||
)
|
||||
}
|
||||
if ttl == nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeRefreshTTLRequired,
|
||||
`No TTL value set`,
|
||||
)
|
||||
@@ -621,13 +601,13 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
||||
rr.Expiration = clock.Now().Add(expr).UnixNano()
|
||||
}
|
||||
|
||||
return rr, nil
|
||||
return rr, noValueOnSuccess, nil
|
||||
}
|
||||
|
||||
// writeKeyEvent trims the prefix of key path in a single Event under
|
||||
// StoreKeysPrefix, serializes it and writes the resulting JSON to the given
|
||||
// ResponseWriter, along with the appropriate headers.
|
||||
func writeKeyEvent(w http.ResponseWriter, ev *store.Event, rt etcdserver.RaftTimer) error {
|
||||
func writeKeyEvent(w http.ResponseWriter, ev *store.Event, noValueOnSuccess bool, rt etcdserver.RaftTimer) error {
|
||||
if ev == nil {
|
||||
return errors.New("cannot write empty Event!")
|
||||
}
|
||||
@@ -641,6 +621,12 @@ func writeKeyEvent(w http.ResponseWriter, ev *store.Event, rt etcdserver.RaftTim
|
||||
}
|
||||
|
||||
ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix)
|
||||
if noValueOnSuccess &&
|
||||
(ev.Action == store.Set || ev.Action == store.CompareAndSwap ||
|
||||
ev.Action == store.Create || ev.Action == store.Update) {
|
||||
ev.Node = nil
|
||||
ev.PrevNode = nil
|
||||
}
|
||||
return json.NewEncoder(w).Encode(ev)
|
||||
}
|
||||
|
||||
@@ -747,6 +733,10 @@ func trimErrorPrefix(err error, prefix string) error {
|
||||
|
||||
func unmarshalRequest(r *http.Request, req json.Unmarshaler, w http.ResponseWriter) bool {
|
||||
ctype := r.Header.Get("Content-Type")
|
||||
semicolonPosition := strings.Index(ctype, ";")
|
||||
if semicolonPosition != -1 {
|
||||
ctype = strings.TrimSpace(strings.ToLower(ctype[0:semicolonPosition]))
|
||||
}
|
||||
if ctype != "application/json" {
|
||||
writeError(w, r, httptypes.NewHTTPError(http.StatusUnsupportedMediaType, fmt.Sprintf("Bad Content-Type %s, accept application/json", ctype)))
|
||||
return false
|
||||
|
||||
4
vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go
generated
vendored
4
vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go
generated
vendored
@@ -35,7 +35,7 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver/api", "v2http")
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v2http")
|
||||
mlog = logutil.NewMergeLogger(plog)
|
||||
)
|
||||
|
||||
@@ -60,7 +60,7 @@ func writeError(w http.ResponseWriter, r *http.Request, err error) {
|
||||
}
|
||||
default:
|
||||
switch err {
|
||||
case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers:
|
||||
case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy:
|
||||
mlog.MergeError(err)
|
||||
default:
|
||||
mlog.MergeErrorf("got unexpected response error (%v)", err)
|
||||
|
||||
2
vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go
generated
vendored
2
vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go
generated
vendored
@@ -22,7 +22,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver/api/v2http", "httptypes")
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v2http/httptypes")
|
||||
)
|
||||
|
||||
type HTTPError struct {
|
||||
|
||||
9
vendor/github.com/coreos/etcd/etcdserver/api/v2http/peer.go
generated
vendored
9
vendor/github.com/coreos/etcd/etcdserver/api/v2http/peer.go
generated
vendored
@@ -26,14 +26,14 @@ import (
|
||||
|
||||
const (
|
||||
peerMembersPrefix = "/members"
|
||||
leasesPrefix = "/leases"
|
||||
)
|
||||
|
||||
// NewPeerHandler generates an http.Handler to handle etcd peer requests.
|
||||
func NewPeerHandler(s *etcdserver.EtcdServer) http.Handler {
|
||||
var lh http.Handler
|
||||
if l := s.Lessor(); l != nil {
|
||||
lh = leasehttp.NewHandler(l)
|
||||
l := s.Lessor()
|
||||
if l != nil {
|
||||
lh = leasehttp.NewHandler(l, func() <-chan struct{} { return s.ApplyWait() })
|
||||
}
|
||||
return newPeerHandler(s.Cluster(), s.RaftHandler(), lh)
|
||||
}
|
||||
@@ -49,7 +49,8 @@ func newPeerHandler(cluster api.Cluster, raftHandler http.Handler, leaseHandler
|
||||
mux.Handle(rafthttp.RaftPrefix+"/", raftHandler)
|
||||
mux.Handle(peerMembersPrefix, mh)
|
||||
if leaseHandler != nil {
|
||||
mux.Handle(leasesPrefix, leaseHandler)
|
||||
mux.Handle(leasehttp.LeasePrefix, leaseHandler)
|
||||
mux.Handle(leasehttp.LeaseInternalPrefix, leaseHandler)
|
||||
}
|
||||
mux.HandleFunc(versionPath, versionHandler(cluster, serveVersion))
|
||||
return mux
|
||||
|
||||
3
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go
generated
vendored
3
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go
generated
vendored
@@ -19,14 +19,13 @@ import (
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
func init() {
|
||||
grpclog.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver", "v3rpc/grpc"))
|
||||
grpclog.SetLogger(plog)
|
||||
}
|
||||
|
||||
func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server {
|
||||
|
||||
42
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go
generated
vendored
42
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go
generated
vendored
@@ -15,7 +15,6 @@
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -25,6 +24,7 @@ import (
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/etcd/raft"
|
||||
|
||||
prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
@@ -53,7 +53,8 @@ func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
|
||||
}
|
||||
}
|
||||
}
|
||||
return metricsUnaryInterceptor(ctx, req, info, handler)
|
||||
|
||||
return prometheus.UnaryServerInterceptor(ctx, req, info, handler)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -88,44 +89,11 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor
|
||||
|
||||
}
|
||||
}
|
||||
return metricsStreamInterceptor(srv, ss, info, handler)
|
||||
|
||||
return prometheus.StreamServerInterceptor(srv, ss, info, handler)
|
||||
}
|
||||
}
|
||||
|
||||
func metricsUnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
|
||||
service, method := splitMethodName(info.FullMethod)
|
||||
receivedCounter.WithLabelValues(service, method).Inc()
|
||||
|
||||
start := time.Now()
|
||||
resp, err = handler(ctx, req)
|
||||
if err != nil {
|
||||
failedCounter.WithLabelValues(service, method, grpc.Code(err).String()).Inc()
|
||||
}
|
||||
handlingDuration.WithLabelValues(service, method).Observe(time.Since(start).Seconds())
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func metricsStreamInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
service, method := splitMethodName(info.FullMethod)
|
||||
receivedCounter.WithLabelValues(service, method).Inc()
|
||||
|
||||
err := handler(srv, ss)
|
||||
if err != nil {
|
||||
failedCounter.WithLabelValues(service, method, grpc.Code(err).String()).Inc()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func splitMethodName(fullMethodName string) (string, string) {
|
||||
fullMethodName = strings.TrimPrefix(fullMethodName, "/") // remove leading slash
|
||||
if i := strings.Index(fullMethodName, "/"); i >= 0 {
|
||||
return fullMethodName[:i], fullMethodName[i+1:]
|
||||
}
|
||||
return "unknown", "unknown"
|
||||
}
|
||||
|
||||
type serverStreamWithCtx struct {
|
||||
grpc.ServerStream
|
||||
ctx context.Context
|
||||
|
||||
10
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go
generated
vendored
10
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go
generated
vendored
@@ -26,7 +26,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver/api", "v3rpc")
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v3rpc")
|
||||
|
||||
// Max operations per txn list. For example, Txn.Success can have at most 128 operations,
|
||||
// and Txn.Failure can have at most 128 operations.
|
||||
@@ -56,7 +56,7 @@ func (s *kvServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResp
|
||||
plog.Panic("unexpected nil resp.Header")
|
||||
}
|
||||
s.hdr.fill(resp.Header)
|
||||
return resp, err
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
@@ -73,7 +73,7 @@ func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse,
|
||||
plog.Panic("unexpected nil resp.Header")
|
||||
}
|
||||
s.hdr.fill(resp.Header)
|
||||
return resp, err
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||
@@ -90,7 +90,7 @@ func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*
|
||||
plog.Panic("unexpected nil resp.Header")
|
||||
}
|
||||
s.hdr.fill(resp.Header)
|
||||
return resp, err
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
@@ -107,7 +107,7 @@ func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse,
|
||||
plog.Panic("unexpected nil resp.Header")
|
||||
}
|
||||
s.hdr.fill(resp.Header)
|
||||
return resp, err
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
|
||||
|
||||
20
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go
generated
vendored
20
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go
generated
vendored
@@ -18,7 +18,6 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/lease"
|
||||
"golang.org/x/net/context"
|
||||
@@ -35,20 +34,27 @@ func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
|
||||
|
||||
func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
|
||||
resp, err := ls.le.LeaseGrant(ctx, cr)
|
||||
if err == lease.ErrLeaseExists {
|
||||
return nil, rpctypes.ErrGRPCLeaseExist
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
ls.hdr.fill(resp.Header)
|
||||
return resp, err
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
|
||||
resp, err := ls.le.LeaseRevoke(ctx, rr)
|
||||
if err != nil {
|
||||
return nil, rpctypes.ErrGRPCLeaseNotFound
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
ls.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
|
||||
resp, err := ls.le.LeaseTimeToLive(ctx, rr)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
ls.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
|
||||
54
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go
generated
vendored
54
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go
generated
vendored
@@ -18,6 +18,7 @@ import (
|
||||
"crypto/sha256"
|
||||
"io"
|
||||
|
||||
"github.com/coreos/etcd/auth"
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
@@ -45,6 +46,10 @@ type RaftStatusGetter interface {
|
||||
Leader() types.ID
|
||||
}
|
||||
|
||||
type AuthGetter interface {
|
||||
AuthStore() auth.AuthStore
|
||||
}
|
||||
|
||||
type maintenanceServer struct {
|
||||
rg RaftStatusGetter
|
||||
kg KVGetter
|
||||
@@ -54,7 +59,8 @@ type maintenanceServer struct {
|
||||
}
|
||||
|
||||
func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer {
|
||||
return &maintenanceServer{rg: s, kg: s, bg: s, a: s, hdr: newHeader(s)}
|
||||
srv := &maintenanceServer{rg: s, kg: s, bg: s, a: s, hdr: newHeader(s)}
|
||||
return &authMaintenanceServer{srv, s}
|
||||
}
|
||||
|
||||
func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
|
||||
@@ -139,3 +145,49 @@ func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (
|
||||
ms.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
type authMaintenanceServer struct {
|
||||
*maintenanceServer
|
||||
ag AuthGetter
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error {
|
||||
authInfo, err := ams.ag.AuthStore().AuthInfoFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ams.ag.AuthStore().IsAdminPermitted(authInfo)
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
|
||||
if err := ams.isAuthenticated(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ams.maintenanceServer.Defragment(ctx, sr)
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
|
||||
if err := ams.isAuthenticated(srv.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ams.maintenanceServer.Snapshot(sr, srv)
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
|
||||
if err := ams.isAuthenticated(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ams.maintenanceServer.Hash(ctx, r)
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
|
||||
if err := ams.isAuthenticated(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ams.maintenanceServer.Status(ctx, ar)
|
||||
}
|
||||
|
||||
34
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go
generated
vendored
34
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go
generated
vendored
@@ -24,8 +24,6 @@ import (
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
type ClusterServer struct {
|
||||
@@ -50,14 +48,8 @@ func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest)
|
||||
|
||||
now := time.Now()
|
||||
m := membership.NewMember("", urls, "", &now)
|
||||
err = cs.server.AddMember(ctx, *m)
|
||||
switch {
|
||||
case err == membership.ErrIDExists:
|
||||
return nil, rpctypes.ErrGRPCMemberExist
|
||||
case err == membership.ErrPeerURLexists:
|
||||
return nil, rpctypes.ErrGRPCPeerURLExist
|
||||
case err != nil:
|
||||
return nil, grpc.Errorf(codes.Internal, err.Error())
|
||||
if err = cs.server.AddMember(ctx, *m); err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
|
||||
return &pb.MemberAddResponse{
|
||||
@@ -67,16 +59,9 @@ func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest)
|
||||
}
|
||||
|
||||
func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) {
|
||||
err := cs.server.RemoveMember(ctx, r.ID)
|
||||
switch {
|
||||
case err == membership.ErrIDRemoved:
|
||||
fallthrough
|
||||
case err == membership.ErrIDNotFound:
|
||||
return nil, rpctypes.ErrGRPCMemberNotFound
|
||||
case err != nil:
|
||||
return nil, grpc.Errorf(codes.Internal, err.Error())
|
||||
if err := cs.server.RemoveMember(ctx, r.ID); err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
|
||||
return &pb.MemberRemoveResponse{Header: cs.header()}, nil
|
||||
}
|
||||
|
||||
@@ -85,16 +70,9 @@ func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateReq
|
||||
ID: types.ID(r.ID),
|
||||
RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs},
|
||||
}
|
||||
err := cs.server.UpdateMember(ctx, m)
|
||||
switch {
|
||||
case err == membership.ErrPeerURLexists:
|
||||
return nil, rpctypes.ErrGRPCPeerURLExist
|
||||
case err == membership.ErrIDNotFound:
|
||||
return nil, rpctypes.ErrGRPCMemberNotFound
|
||||
case err != nil:
|
||||
return nil, grpc.Errorf(codes.Internal, err.Error())
|
||||
if err := cs.server.UpdateMember(ctx, m); err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
|
||||
return &pb.MemberUpdateResponse{Header: cs.header()}, nil
|
||||
}
|
||||
|
||||
|
||||
29
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go
generated
vendored
29
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go
generated
vendored
@@ -17,31 +17,6 @@ package v3rpc
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
var (
|
||||
receivedCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "grpc",
|
||||
Name: "requests_total",
|
||||
Help: "Counter of received requests.",
|
||||
}, []string{"grpc_service", "grpc_method"})
|
||||
|
||||
failedCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "grpc",
|
||||
Name: "requests_failed_total",
|
||||
Help: "Counter of failed requests.",
|
||||
}, []string{"grpc_service", "grpc_method", "grpc_code"})
|
||||
|
||||
handlingDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "grpc",
|
||||
Name: "unary_requests_duration_seconds",
|
||||
Help: "Bucketed histogram of processing time (s) of handled unary (non-stream) requests.",
|
||||
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13),
|
||||
}, []string{"grpc_service", "grpc_method"})
|
||||
|
||||
sentBytes = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "network",
|
||||
@@ -58,10 +33,6 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(receivedCounter)
|
||||
prometheus.MustRegister(failedCounter)
|
||||
prometheus.MustRegister(handlingDuration)
|
||||
|
||||
prometheus.MustRegister(sentBytes)
|
||||
prometheus.MustRegister(receivedBytes)
|
||||
}
|
||||
|
||||
45
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
generated
vendored
45
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
generated
vendored
@@ -31,23 +31,28 @@ var (
|
||||
ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found")
|
||||
ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists")
|
||||
|
||||
ErrGRPCMemberExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: member ID already exist")
|
||||
ErrGRPCPeerURLExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: Peer URLs already exists")
|
||||
ErrGRPCMemberBadURLs = grpc.Errorf(codes.InvalidArgument, "etcdserver: given member URLs are invalid")
|
||||
ErrGRPCMemberNotFound = grpc.Errorf(codes.NotFound, "etcdserver: member not found")
|
||||
ErrGRPCMemberExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: member ID already exist")
|
||||
ErrGRPCPeerURLExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: Peer URLs already exists")
|
||||
ErrGRPCMemberNotEnoughStarted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members")
|
||||
ErrGRPCMemberBadURLs = grpc.Errorf(codes.InvalidArgument, "etcdserver: given member URLs are invalid")
|
||||
ErrGRPCMemberNotFound = grpc.Errorf(codes.NotFound, "etcdserver: member not found")
|
||||
|
||||
ErrGRPCRequestTooLarge = grpc.Errorf(codes.InvalidArgument, "etcdserver: request is too large")
|
||||
ErrGRPCRequestTooLarge = grpc.Errorf(codes.InvalidArgument, "etcdserver: request is too large")
|
||||
ErrGRPCRequestTooManyRequests = grpc.Errorf(codes.ResourceExhausted, "etcdserver: too many requests")
|
||||
|
||||
ErrGRPCRootUserNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not exist")
|
||||
ErrGRPCRootRoleNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not have root role")
|
||||
ErrGRPCUserAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name already exists")
|
||||
ErrGRPCUserEmpty = grpc.Errorf(codes.InvalidArgument, "etcdserver: user name is empty")
|
||||
ErrGRPCUserNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name not found")
|
||||
ErrGRPCRoleAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name already exists")
|
||||
ErrGRPCRoleNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name not found")
|
||||
ErrGRPCAuthFailed = grpc.Errorf(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password")
|
||||
ErrGRPCPermissionDenied = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission denied")
|
||||
ErrGRPCPermissionDenied = grpc.Errorf(codes.PermissionDenied, "etcdserver: permission denied")
|
||||
ErrGRPCRoleNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role is not granted to the user")
|
||||
ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role")
|
||||
ErrGRPCAuthNotEnabled = grpc.Errorf(codes.FailedPrecondition, "etcdserver: authentication is not enabled")
|
||||
ErrGRPCInvalidAuthToken = grpc.Errorf(codes.Unauthenticated, "etcdserver: invalid auth token")
|
||||
|
||||
ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader")
|
||||
ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable")
|
||||
@@ -68,16 +73,19 @@ var (
|
||||
grpc.ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound,
|
||||
grpc.ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist,
|
||||
|
||||
grpc.ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
|
||||
grpc.ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
|
||||
grpc.ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs,
|
||||
grpc.ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound,
|
||||
grpc.ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
|
||||
grpc.ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
|
||||
grpc.ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted,
|
||||
grpc.ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs,
|
||||
grpc.ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound,
|
||||
|
||||
grpc.ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge,
|
||||
grpc.ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge,
|
||||
grpc.ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests,
|
||||
|
||||
grpc.ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist,
|
||||
grpc.ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist,
|
||||
grpc.ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist,
|
||||
grpc.ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty,
|
||||
grpc.ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound,
|
||||
grpc.ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist,
|
||||
grpc.ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound,
|
||||
@@ -85,6 +93,8 @@ var (
|
||||
grpc.ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied,
|
||||
grpc.ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted,
|
||||
grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
|
||||
grpc.ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled,
|
||||
grpc.ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken,
|
||||
|
||||
grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
|
||||
grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
|
||||
@@ -106,16 +116,19 @@ var (
|
||||
ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound)
|
||||
ErrLeaseExist = Error(ErrGRPCLeaseExist)
|
||||
|
||||
ErrMemberExist = Error(ErrGRPCMemberExist)
|
||||
ErrPeerURLExist = Error(ErrGRPCPeerURLExist)
|
||||
ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs)
|
||||
ErrMemberNotFound = Error(ErrGRPCMemberNotFound)
|
||||
ErrMemberExist = Error(ErrGRPCMemberExist)
|
||||
ErrPeerURLExist = Error(ErrGRPCPeerURLExist)
|
||||
ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted)
|
||||
ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs)
|
||||
ErrMemberNotFound = Error(ErrGRPCMemberNotFound)
|
||||
|
||||
ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
|
||||
ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests)
|
||||
|
||||
ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist)
|
||||
ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist)
|
||||
ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist)
|
||||
ErrUserEmpty = Error(ErrGRPCUserEmpty)
|
||||
ErrUserNotFound = Error(ErrGRPCUserNotFound)
|
||||
ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist)
|
||||
ErrRoleNotFound = Error(ErrGRPCRoleNotFound)
|
||||
@@ -123,6 +136,8 @@ var (
|
||||
ErrPermissionDenied = Error(ErrGRPCPermissionDenied)
|
||||
ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted)
|
||||
ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
|
||||
ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled)
|
||||
ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken)
|
||||
|
||||
ErrNoLeader = Error(ErrGRPCNoLeader)
|
||||
ErrNotCapable = Error(ErrGRPCNotCapable)
|
||||
|
||||
30
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go
generated
vendored
30
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go
generated
vendored
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/coreos/etcd/auth"
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/lease"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"google.golang.org/grpc"
|
||||
@@ -26,17 +27,29 @@ import (
|
||||
|
||||
func togRPCError(err error) error {
|
||||
switch err {
|
||||
case membership.ErrIDRemoved:
|
||||
return rpctypes.ErrGRPCMemberNotFound
|
||||
case membership.ErrIDNotFound:
|
||||
return rpctypes.ErrGRPCMemberNotFound
|
||||
case membership.ErrIDExists:
|
||||
return rpctypes.ErrGRPCMemberExist
|
||||
case membership.ErrPeerURLexists:
|
||||
return rpctypes.ErrGRPCPeerURLExist
|
||||
case etcdserver.ErrNotEnoughStartedMembers:
|
||||
return rpctypes.ErrMemberNotEnoughStarted
|
||||
|
||||
case mvcc.ErrCompacted:
|
||||
return rpctypes.ErrGRPCCompacted
|
||||
case mvcc.ErrFutureRev:
|
||||
return rpctypes.ErrGRPCFutureRev
|
||||
case lease.ErrLeaseNotFound:
|
||||
return rpctypes.ErrGRPCLeaseNotFound
|
||||
// TODO: handle error from raft and timeout
|
||||
case etcdserver.ErrRequestTooLarge:
|
||||
return rpctypes.ErrGRPCRequestTooLarge
|
||||
case etcdserver.ErrNoSpace:
|
||||
return rpctypes.ErrGRPCNoSpace
|
||||
case etcdserver.ErrTooManyRequests:
|
||||
return rpctypes.ErrTooManyRequests
|
||||
|
||||
case etcdserver.ErrNoLeader:
|
||||
return rpctypes.ErrGRPCNoLeader
|
||||
@@ -48,6 +61,13 @@ func togRPCError(err error) error {
|
||||
return rpctypes.ErrGRPCTimeoutDueToLeaderFail
|
||||
case etcdserver.ErrTimeoutDueToConnectionLost:
|
||||
return rpctypes.ErrGRPCTimeoutDueToConnectionLost
|
||||
case etcdserver.ErrUnhealthy:
|
||||
return rpctypes.ErrGRPCUnhealthy
|
||||
|
||||
case lease.ErrLeaseNotFound:
|
||||
return rpctypes.ErrGRPCLeaseNotFound
|
||||
case lease.ErrLeaseExists:
|
||||
return rpctypes.ErrGRPCLeaseExist
|
||||
|
||||
case auth.ErrRootUserNotExist:
|
||||
return rpctypes.ErrGRPCRootUserNotExist
|
||||
@@ -55,6 +75,8 @@ func togRPCError(err error) error {
|
||||
return rpctypes.ErrGRPCRootRoleNotExist
|
||||
case auth.ErrUserAlreadyExist:
|
||||
return rpctypes.ErrGRPCUserAlreadyExist
|
||||
case auth.ErrUserEmpty:
|
||||
return rpctypes.ErrGRPCUserEmpty
|
||||
case auth.ErrUserNotFound:
|
||||
return rpctypes.ErrGRPCUserNotFound
|
||||
case auth.ErrRoleAlreadyExist:
|
||||
@@ -69,7 +91,11 @@ func togRPCError(err error) error {
|
||||
return rpctypes.ErrGRPCRoleNotGranted
|
||||
case auth.ErrPermissionNotGranted:
|
||||
return rpctypes.ErrGRPCPermissionNotGranted
|
||||
case auth.ErrAuthNotEnabled:
|
||||
return rpctypes.ErrGRPCAuthNotEnabled
|
||||
case auth.ErrInvalidAuthToken:
|
||||
return rpctypes.ErrGRPCInvalidAuthToken
|
||||
default:
|
||||
return grpc.Errorf(codes.Internal, err.Error())
|
||||
return grpc.Errorf(codes.Unknown, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
36
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go
generated
vendored
36
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go
generated
vendored
@@ -92,6 +92,7 @@ type serverWatchStream struct {
|
||||
mu sync.Mutex
|
||||
// progress tracks the watchID that stream might need to send
|
||||
// progress to.
|
||||
// TODO: combine progress and prevKV into a single struct?
|
||||
progress map[mvcc.WatchID]bool
|
||||
prevKV map[mvcc.WatchID]bool
|
||||
|
||||
@@ -130,10 +131,14 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
|
||||
// but when stream.Context().Done() is closed, the stream's recv
|
||||
// may continue to block since it uses a different context, leading to
|
||||
// deadlock when calling sws.close().
|
||||
go func() { errc <- sws.recvLoop() }()
|
||||
|
||||
go func() {
|
||||
if rerr := sws.recvLoop(); rerr != nil {
|
||||
errc <- rerr
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case err = <-errc:
|
||||
close(sws.ctrlStream)
|
||||
case <-stream.Context().Done():
|
||||
err = stream.Context().Err()
|
||||
// the only server-side cancellation is noleader for now.
|
||||
@@ -146,7 +151,6 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
|
||||
}
|
||||
|
||||
func (sws *serverWatchStream) recvLoop() error {
|
||||
defer close(sws.ctrlStream)
|
||||
for {
|
||||
req, err := sws.gRPCStream.Recv()
|
||||
if err == io.EOF {
|
||||
@@ -171,12 +175,14 @@ func (sws *serverWatchStream) recvLoop() error {
|
||||
// support >= key queries
|
||||
creq.RangeEnd = []byte{}
|
||||
}
|
||||
filters := FiltersFromRequest(creq)
|
||||
|
||||
wsrev := sws.watchStream.Rev()
|
||||
rev := creq.StartRevision
|
||||
if rev == 0 {
|
||||
rev = wsrev + 1
|
||||
}
|
||||
id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev)
|
||||
id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev, filters...)
|
||||
if id != -1 {
|
||||
sws.mu.Lock()
|
||||
if creq.ProgressNotify {
|
||||
@@ -353,3 +359,25 @@ func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader {
|
||||
RaftTerm: sws.raftTimer.Term(),
|
||||
}
|
||||
}
|
||||
|
||||
func filterNoDelete(e mvccpb.Event) bool {
|
||||
return e.Type == mvccpb.DELETE
|
||||
}
|
||||
|
||||
func filterNoPut(e mvccpb.Event) bool {
|
||||
return e.Type == mvccpb.PUT
|
||||
}
|
||||
|
||||
func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc {
|
||||
filters := make([]mvcc.FilterFunc, 0, len(creq.Filters))
|
||||
for _, ft := range creq.Filters {
|
||||
switch ft {
|
||||
case pb.WatchCreateRequest_NOPUT:
|
||||
filters = append(filters, filterNoPut)
|
||||
case pb.WatchCreateRequest_NODELETE:
|
||||
filters = append(filters, filterNoDelete)
|
||||
default:
|
||||
}
|
||||
}
|
||||
return filters
|
||||
}
|
||||
|
||||
100
vendor/github.com/coreos/etcd/etcdserver/apply.go
generated
vendored
100
vendor/github.com/coreos/etcd/etcdserver/apply.go
generated
vendored
@@ -35,7 +35,7 @@ const (
|
||||
// to apply functions instead of a valid txn ID.
|
||||
noTxn = -1
|
||||
|
||||
warnApplyDuration = 10 * time.Millisecond
|
||||
warnApplyDuration = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
type applyResult struct {
|
||||
@@ -258,7 +258,9 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
|
||||
}
|
||||
|
||||
limit := r.Limit
|
||||
if r.SortOrder != pb.RangeRequest_NONE {
|
||||
if r.SortOrder != pb.RangeRequest_NONE ||
|
||||
r.MinModRevision != 0 || r.MaxModRevision != 0 ||
|
||||
r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 {
|
||||
// fetch everything; sort and truncate afterwards
|
||||
limit = 0
|
||||
}
|
||||
@@ -285,7 +287,31 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
|
||||
}
|
||||
}
|
||||
|
||||
if r.SortOrder != pb.RangeRequest_NONE {
|
||||
if r.MaxModRevision != 0 {
|
||||
f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision }
|
||||
pruneKVs(rr, f)
|
||||
}
|
||||
if r.MinModRevision != 0 {
|
||||
f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision }
|
||||
pruneKVs(rr, f)
|
||||
}
|
||||
if r.MaxCreateRevision != 0 {
|
||||
f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision }
|
||||
pruneKVs(rr, f)
|
||||
}
|
||||
if r.MinCreateRevision != 0 {
|
||||
f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision }
|
||||
pruneKVs(rr, f)
|
||||
}
|
||||
|
||||
sortOrder := r.SortOrder
|
||||
if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE {
|
||||
// Since current mvcc.Range implementation returns results
|
||||
// sorted by keys in lexiographically ascending order,
|
||||
// sort ASCEND by default only when target is not 'KEY'
|
||||
sortOrder = pb.RangeRequest_ASCEND
|
||||
}
|
||||
if sortOrder != pb.RangeRequest_NONE {
|
||||
var sorter sort.Interface
|
||||
switch {
|
||||
case r.SortTarget == pb.RangeRequest_KEY:
|
||||
@@ -300,9 +326,9 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
|
||||
sorter = &kvSortByValue{&kvSort{rr.KVs}}
|
||||
}
|
||||
switch {
|
||||
case r.SortOrder == pb.RangeRequest_ASCEND:
|
||||
case sortOrder == pb.RangeRequest_ASCEND:
|
||||
sort.Sort(sorter)
|
||||
case r.SortOrder == pb.RangeRequest_DESCEND:
|
||||
case sortOrder == pb.RangeRequest_DESCEND:
|
||||
sort.Sort(sort.Reverse(sorter))
|
||||
}
|
||||
}
|
||||
@@ -345,34 +371,23 @@ func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
revision := a.s.KV().Rev()
|
||||
|
||||
// When executing the operations of txn, we need to hold the txn lock.
|
||||
// So the reader will not see any intermediate results.
|
||||
txnID := a.s.KV().TxnBegin()
|
||||
defer func() {
|
||||
err := a.s.KV().TxnEnd(txnID)
|
||||
if err != nil {
|
||||
panic(fmt.Sprint("unexpected error when closing txn", txnID))
|
||||
}
|
||||
}()
|
||||
|
||||
resps := make([]*pb.ResponseOp, len(reqs))
|
||||
changedKV := false
|
||||
for i := range reqs {
|
||||
if reqs[i].GetRequestRange() == nil {
|
||||
changedKV = true
|
||||
}
|
||||
resps[i] = a.applyUnion(txnID, reqs[i])
|
||||
}
|
||||
|
||||
if changedKV {
|
||||
revision += 1
|
||||
err := a.s.KV().TxnEnd(txnID)
|
||||
if err != nil {
|
||||
panic(fmt.Sprint("unexpected error when closing txn", txnID))
|
||||
}
|
||||
|
||||
txnResp := &pb.TxnResponse{}
|
||||
txnResp.Header = &pb.ResponseHeader{}
|
||||
txnResp.Header.Revision = revision
|
||||
txnResp.Header.Revision = a.s.KV().Rev()
|
||||
txnResp.Responses = resps
|
||||
txnResp.Succeeded = ok
|
||||
return txnResp, nil
|
||||
@@ -436,6 +451,10 @@ func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) {
|
||||
if result != 0 {
|
||||
return rev, false
|
||||
}
|
||||
case pb.Compare_NOT_EQUAL:
|
||||
if result == 0 {
|
||||
return rev, false
|
||||
}
|
||||
case pb.Compare_GREATER:
|
||||
if result != 1 {
|
||||
return rev, false
|
||||
@@ -454,7 +473,7 @@ func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.Resp
|
||||
if tv.RequestRange != nil {
|
||||
resp, err := a.Range(txnID, tv.RequestRange)
|
||||
if err != nil {
|
||||
panic("unexpected error during txn")
|
||||
plog.Panicf("unexpected error during txn: %v", err)
|
||||
}
|
||||
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{ResponseRange: resp}}
|
||||
}
|
||||
@@ -462,7 +481,7 @@ func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.Resp
|
||||
if tv.RequestPut != nil {
|
||||
resp, err := a.Put(txnID, tv.RequestPut)
|
||||
if err != nil {
|
||||
panic("unexpected error during txn")
|
||||
plog.Panicf("unexpected error during txn: %v", err)
|
||||
}
|
||||
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{ResponsePut: resp}}
|
||||
}
|
||||
@@ -470,7 +489,7 @@ func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.Resp
|
||||
if tv.RequestDeleteRange != nil {
|
||||
resp, err := a.DeleteRange(txnID, tv.RequestDeleteRange)
|
||||
if err != nil {
|
||||
panic("unexpected error during txn")
|
||||
plog.Panicf("unexpected error during txn: %v", err)
|
||||
}
|
||||
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{ResponseDeleteRange: resp}}
|
||||
}
|
||||
@@ -500,7 +519,7 @@ func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantR
|
||||
resp := &pb.LeaseGrantResponse{}
|
||||
if err == nil {
|
||||
resp.ID = int64(l.ID)
|
||||
resp.TTL = l.TTL
|
||||
resp.TTL = l.TTL()
|
||||
resp.Header = &pb.ResponseHeader{Revision: a.s.KV().Rev()}
|
||||
}
|
||||
|
||||
@@ -784,3 +803,36 @@ func compareInt64(a, b int64) int {
|
||||
func isGteRange(rangeEnd []byte) bool {
|
||||
return len(rangeEnd) == 1 && rangeEnd[0] == 0
|
||||
}
|
||||
|
||||
func noSideEffect(r *pb.InternalRaftRequest) bool {
|
||||
return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil
|
||||
}
|
||||
|
||||
func removeNeedlessRangeReqs(txn *pb.TxnRequest) {
|
||||
f := func(ops []*pb.RequestOp) []*pb.RequestOp {
|
||||
j := 0
|
||||
for i := 0; i < len(ops); i++ {
|
||||
if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok {
|
||||
continue
|
||||
}
|
||||
ops[j] = ops[i]
|
||||
j++
|
||||
}
|
||||
|
||||
return ops[:j]
|
||||
}
|
||||
|
||||
txn.Success = f(txn.Success)
|
||||
txn.Failure = f(txn.Failure)
|
||||
}
|
||||
|
||||
func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) {
|
||||
j := 0
|
||||
for i := range rr.KVs {
|
||||
rr.KVs[j] = rr.KVs[i]
|
||||
if !isPrunable(&rr.KVs[i]) {
|
||||
j++
|
||||
}
|
||||
}
|
||||
rr.KVs = rr.KVs[:j]
|
||||
}
|
||||
|
||||
100
vendor/github.com/coreos/etcd/etcdserver/apply_auth.go
generated
vendored
100
vendor/github.com/coreos/etcd/etcdserver/apply_auth.go
generated
vendored
@@ -27,8 +27,9 @@ type authApplierV3 struct {
|
||||
|
||||
// mu serializes Apply so that user isn't corrupted and so that
|
||||
// serialized requests don't leak data from TOCTOU errors
|
||||
mu sync.Mutex
|
||||
user string
|
||||
mu sync.Mutex
|
||||
|
||||
authInfo auth.AuthInfo
|
||||
}
|
||||
|
||||
func newAuthApplierV3(as auth.AuthStore, base applierV3) *authApplierV3 {
|
||||
@@ -41,45 +42,57 @@ func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult {
|
||||
if r.Header != nil {
|
||||
// backward-compatible with pre-3.0 releases when internalRaftRequest
|
||||
// does not have header field
|
||||
aa.user = r.Header.Username
|
||||
aa.authInfo.Username = r.Header.Username
|
||||
aa.authInfo.Revision = r.Header.AuthRevision
|
||||
}
|
||||
if needAdminPermission(r) && !aa.as.IsAdminPermitted(aa.user) {
|
||||
aa.user = ""
|
||||
return &applyResult{err: auth.ErrPermissionDenied}
|
||||
if needAdminPermission(r) {
|
||||
if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil {
|
||||
aa.authInfo.Username = ""
|
||||
aa.authInfo.Revision = 0
|
||||
return &applyResult{err: err}
|
||||
}
|
||||
}
|
||||
ret := aa.applierV3.Apply(r)
|
||||
aa.user = ""
|
||||
aa.authInfo.Username = ""
|
||||
aa.authInfo.Revision = 0
|
||||
return ret
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) Put(txnID int64, r *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
if !aa.as.IsPutPermitted(aa.user, r.Key) {
|
||||
return nil, auth.ErrPermissionDenied
|
||||
if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r.PrevKv && !aa.as.IsRangePermitted(aa.user, r.Key, nil) {
|
||||
return nil, auth.ErrPermissionDenied
|
||||
if r.PrevKv {
|
||||
err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return aa.applierV3.Put(txnID, r)
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||
if !aa.as.IsRangePermitted(aa.user, r.Key, r.RangeEnd) {
|
||||
return nil, auth.ErrPermissionDenied
|
||||
if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return aa.applierV3.Range(txnID, r)
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) DeleteRange(txnID int64, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||
if !aa.as.IsDeleteRangePermitted(aa.user, r.Key, r.RangeEnd) {
|
||||
return nil, auth.ErrPermissionDenied
|
||||
if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r.PrevKv && !aa.as.IsRangePermitted(aa.user, r.Key, r.RangeEnd) {
|
||||
return nil, auth.ErrPermissionDenied
|
||||
if r.PrevKv {
|
||||
err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return aa.applierV3.DeleteRange(txnID, r)
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) checkTxnReqsPermission(reqs []*pb.RequestOp) bool {
|
||||
func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error {
|
||||
for _, requ := range reqs {
|
||||
switch tv := requ.Request.(type) {
|
||||
case *pb.RequestOp_RequestRange:
|
||||
@@ -87,8 +100,8 @@ func (aa *authApplierV3) checkTxnReqsPermission(reqs []*pb.RequestOp) bool {
|
||||
continue
|
||||
}
|
||||
|
||||
if !aa.as.IsRangePermitted(aa.user, tv.RequestRange.Key, tv.RequestRange.RangeEnd) {
|
||||
return false
|
||||
if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *pb.RequestOp_RequestPut:
|
||||
@@ -96,8 +109,8 @@ func (aa *authApplierV3) checkTxnReqsPermission(reqs []*pb.RequestOp) bool {
|
||||
continue
|
||||
}
|
||||
|
||||
if !aa.as.IsPutPermitted(aa.user, tv.RequestPut.Key) {
|
||||
return false
|
||||
if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *pb.RequestOp_RequestDeleteRange:
|
||||
@@ -105,29 +118,42 @@ func (aa *authApplierV3) checkTxnReqsPermission(reqs []*pb.RequestOp) bool {
|
||||
continue
|
||||
}
|
||||
|
||||
if tv.RequestDeleteRange.PrevKv && !aa.as.IsRangePermitted(aa.user, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) {
|
||||
return false
|
||||
if tv.RequestDeleteRange.PrevKv {
|
||||
err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error {
|
||||
for _, c := range rt.Compare {
|
||||
if err := as.IsRangePermitted(ai, c.Key, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkTxnReqsPermission(as, ai, rt.Failure); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
for _, c := range rt.Compare {
|
||||
if !aa.as.IsRangePermitted(aa.user, c.Key, nil) {
|
||||
return nil, auth.ErrPermissionDenied
|
||||
}
|
||||
if err := checkTxnAuth(aa.as, &aa.authInfo, rt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !aa.checkTxnReqsPermission(rt.Success) {
|
||||
return nil, auth.ErrPermissionDenied
|
||||
}
|
||||
if !aa.checkTxnReqsPermission(rt.Failure) {
|
||||
return nil, auth.ErrPermissionDenied
|
||||
}
|
||||
|
||||
return aa.applierV3.Txn(rt)
|
||||
}
|
||||
|
||||
|
||||
96
vendor/github.com/coreos/etcd/etcdserver/auth/auth.go
generated
vendored
96
vendor/github.com/coreos/etcd/etcdserver/auth/auth.go
generated
vendored
@@ -46,7 +46,7 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver", "auth")
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/auth")
|
||||
)
|
||||
|
||||
var rootRole = Role{
|
||||
@@ -167,7 +167,7 @@ func (_ passwordStore) HashPassword(password string) (string, error) {
|
||||
}
|
||||
|
||||
func (s *store) AllUsers() ([]string, error) {
|
||||
resp, err := s.requestResource("/users/", false)
|
||||
resp, err := s.requestResource("/users/", false, false)
|
||||
if err != nil {
|
||||
if e, ok := err.(*etcderr.Error); ok {
|
||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||
@@ -185,33 +185,13 @@ func (s *store) AllUsers() ([]string, error) {
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (s *store) GetUser(name string) (User, error) {
|
||||
resp, err := s.requestResource("/users/"+name, false)
|
||||
if err != nil {
|
||||
if e, ok := err.(*etcderr.Error); ok {
|
||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||
return User{}, authErr(http.StatusNotFound, "User %s does not exist.", name)
|
||||
}
|
||||
}
|
||||
return User{}, err
|
||||
}
|
||||
var u User
|
||||
err = json.Unmarshal([]byte(*resp.Event.Node.Value), &u)
|
||||
if err != nil {
|
||||
return u, err
|
||||
}
|
||||
// Attach root role to root user.
|
||||
if u.User == "root" {
|
||||
u = attachRootRole(u)
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
func (s *store) GetUser(name string) (User, error) { return s.getUser(name, false) }
|
||||
|
||||
// CreateOrUpdateUser should be only used for creating the new user or when you are not
|
||||
// sure if it is a create or update. (When only password is passed in, we are not sure
|
||||
// if it is a update or create)
|
||||
func (s *store) CreateOrUpdateUser(user User) (out User, created bool, err error) {
|
||||
_, err = s.GetUser(user.User)
|
||||
_, err = s.getUser(user.User, true)
|
||||
if err == nil {
|
||||
out, err = s.UpdateUser(user)
|
||||
return out, false, err
|
||||
@@ -271,7 +251,7 @@ func (s *store) DeleteUser(name string) error {
|
||||
}
|
||||
|
||||
func (s *store) UpdateUser(user User) (User, error) {
|
||||
old, err := s.GetUser(user.User)
|
||||
old, err := s.getUser(user.User, true)
|
||||
if err != nil {
|
||||
if e, ok := err.(*etcderr.Error); ok {
|
||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||
@@ -297,7 +277,7 @@ func (s *store) UpdateUser(user User) (User, error) {
|
||||
|
||||
func (s *store) AllRoles() ([]string, error) {
|
||||
nodes := []string{RootRoleName}
|
||||
resp, err := s.requestResource("/roles/", false)
|
||||
resp, err := s.requestResource("/roles/", false, false)
|
||||
if err != nil {
|
||||
if e, ok := err.(*etcderr.Error); ok {
|
||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||
@@ -314,23 +294,7 @@ func (s *store) AllRoles() ([]string, error) {
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (s *store) GetRole(name string) (Role, error) {
|
||||
if name == RootRoleName {
|
||||
return rootRole, nil
|
||||
}
|
||||
resp, err := s.requestResource("/roles/"+name, false)
|
||||
if err != nil {
|
||||
if e, ok := err.(*etcderr.Error); ok {
|
||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||
return Role{}, authErr(http.StatusNotFound, "Role %s does not exist.", name)
|
||||
}
|
||||
}
|
||||
return Role{}, err
|
||||
}
|
||||
var r Role
|
||||
err = json.Unmarshal([]byte(*resp.Event.Node.Value), &r)
|
||||
return r, err
|
||||
}
|
||||
func (s *store) GetRole(name string) (Role, error) { return s.getRole(name, false) }
|
||||
|
||||
func (s *store) CreateRole(role Role) error {
|
||||
if role.Role == RootRoleName {
|
||||
@@ -372,7 +336,7 @@ func (s *store) UpdateRole(role Role) (Role, error) {
|
||||
if role.Role == RootRoleName {
|
||||
return Role{}, authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role)
|
||||
}
|
||||
old, err := s.GetRole(role.Role)
|
||||
old, err := s.getRole(role.Role, true)
|
||||
if err != nil {
|
||||
if e, ok := err.(*etcderr.Error); ok {
|
||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||
@@ -404,10 +368,10 @@ func (s *store) EnableAuth() error {
|
||||
return authErr(http.StatusConflict, "already enabled")
|
||||
}
|
||||
|
||||
if _, err := s.GetUser("root"); err != nil {
|
||||
if _, err := s.getUser("root", true); err != nil {
|
||||
return authErr(http.StatusConflict, "No root user available, please create one")
|
||||
}
|
||||
if _, err := s.GetRole(GuestRoleName); err != nil {
|
||||
if _, err := s.getRole(GuestRoleName, true); err != nil {
|
||||
plog.Printf("no guest role access found, creating default")
|
||||
if err := s.CreateRole(guestRole); err != nil {
|
||||
plog.Errorf("error creating guest role. aborting auth enable.")
|
||||
@@ -641,3 +605,43 @@ func attachRootRole(u User) User {
|
||||
}
|
||||
return u
|
||||
}
|
||||
|
||||
func (s *store) getUser(name string, quorum bool) (User, error) {
|
||||
resp, err := s.requestResource("/users/"+name, false, quorum)
|
||||
if err != nil {
|
||||
if e, ok := err.(*etcderr.Error); ok {
|
||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||
return User{}, authErr(http.StatusNotFound, "User %s does not exist.", name)
|
||||
}
|
||||
}
|
||||
return User{}, err
|
||||
}
|
||||
var u User
|
||||
err = json.Unmarshal([]byte(*resp.Event.Node.Value), &u)
|
||||
if err != nil {
|
||||
return u, err
|
||||
}
|
||||
// Attach root role to root user.
|
||||
if u.User == "root" {
|
||||
u = attachRootRole(u)
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
||||
func (s *store) getRole(name string, quorum bool) (Role, error) {
|
||||
if name == RootRoleName {
|
||||
return rootRole, nil
|
||||
}
|
||||
resp, err := s.requestResource("/roles/"+name, false, quorum)
|
||||
if err != nil {
|
||||
if e, ok := err.(*etcderr.Error); ok {
|
||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||
return Role{}, authErr(http.StatusNotFound, "Role %s does not exist.", name)
|
||||
}
|
||||
}
|
||||
return Role{}, err
|
||||
}
|
||||
var r Role
|
||||
err = json.Unmarshal([]byte(*resp.Event.Node.Value), &r)
|
||||
return r, err
|
||||
}
|
||||
|
||||
10
vendor/github.com/coreos/etcd/etcdserver/auth/auth_requests.go
generated
vendored
10
vendor/github.com/coreos/etcd/etcdserver/auth/auth_requests.go
generated
vendored
@@ -85,7 +85,7 @@ func (s *store) detectAuth() bool {
|
||||
if s.server == nil {
|
||||
return false
|
||||
}
|
||||
value, err := s.requestResource("/enabled", false)
|
||||
value, err := s.requestResource("/enabled", false, false)
|
||||
if err != nil {
|
||||
if e, ok := err.(*etcderr.Error); ok {
|
||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||
@@ -105,12 +105,16 @@ func (s *store) detectAuth() bool {
|
||||
return u
|
||||
}
|
||||
|
||||
func (s *store) requestResource(res string, dir bool) (etcdserver.Response, error) {
|
||||
func (s *store) requestResource(res string, dir, quorum bool) (etcdserver.Response, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
|
||||
defer cancel()
|
||||
p := path.Join(StorePermsPrefix, res)
|
||||
method := "GET"
|
||||
if quorum {
|
||||
method = "QGET"
|
||||
}
|
||||
rr := etcdserverpb.Request{
|
||||
Method: "GET",
|
||||
Method: method,
|
||||
Path: p,
|
||||
Dir: dir,
|
||||
}
|
||||
|
||||
11
vendor/github.com/coreos/etcd/etcdserver/cluster_util.go
generated
vendored
11
vendor/github.com/coreos/etcd/etcdserver/cluster_util.go
generated
vendored
@@ -94,7 +94,16 @@ func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool
|
||||
}
|
||||
continue
|
||||
}
|
||||
return membership.NewClusterFromMembers("", id, membs), nil
|
||||
|
||||
// check the length of membership members
|
||||
// if the membership members are present then prepare and return raft cluster
|
||||
// if membership members are not present then the raft cluster formed will be
|
||||
// an invalid empty cluster hence return failed to get raft cluster member(s) from the given urls error
|
||||
if len(membs) > 0 {
|
||||
return membership.NewClusterFromMembers("", id, membs), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to get raft cluster member(s) from the given urls.")
|
||||
}
|
||||
return nil, fmt.Errorf("could not retrieve cluster information from the given urls")
|
||||
}
|
||||
|
||||
59
vendor/github.com/coreos/etcd/etcdserver/config.go
generated
vendored
59
vendor/github.com/coreos/etcd/etcdserver/config.go
generated
vendored
@@ -16,11 +16,13 @@ package etcdserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/pkg/netutil"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
@@ -55,8 +57,6 @@ type ServerConfig struct {
|
||||
|
||||
StrictReconfigCheck bool
|
||||
|
||||
EnablePprof bool
|
||||
|
||||
// ClientCertAuthEnabled is true when cert has been signed by the client CA.
|
||||
ClientCertAuthEnabled bool
|
||||
}
|
||||
@@ -64,7 +64,10 @@ type ServerConfig struct {
|
||||
// VerifyBootstrap sanity-checks the initial config for bootstrap case
|
||||
// and returns an error for things that should never happen.
|
||||
func (c *ServerConfig) VerifyBootstrap() error {
|
||||
if err := c.verifyLocalMember(true); err != nil {
|
||||
if err := c.hasLocalMember(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.advertiseMatchesCluster(); err != nil {
|
||||
return err
|
||||
}
|
||||
if checkDuplicateURL(c.InitialPeerURLsMap) {
|
||||
@@ -79,10 +82,9 @@ func (c *ServerConfig) VerifyBootstrap() error {
|
||||
// VerifyJoinExisting sanity-checks the initial config for join existing cluster
|
||||
// case and returns an error for things that should never happen.
|
||||
func (c *ServerConfig) VerifyJoinExisting() error {
|
||||
// no need for strict checking since the member have announced its
|
||||
// peer urls to the cluster before starting and do not have to set
|
||||
// it in the configuration again.
|
||||
if err := c.verifyLocalMember(false); err != nil {
|
||||
// The member has announced its peer urls to the cluster before starting; no need to
|
||||
// set the configuration again.
|
||||
if err := c.hasLocalMember(); err != nil {
|
||||
return err
|
||||
}
|
||||
if checkDuplicateURL(c.InitialPeerURLsMap) {
|
||||
@@ -94,39 +96,38 @@ func (c *ServerConfig) VerifyJoinExisting() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyLocalMember verifies the configured member is in configured
|
||||
// cluster. If strict is set, it also verifies the configured member
|
||||
// has the same peer urls as configured advertised peer urls.
|
||||
func (c *ServerConfig) verifyLocalMember(strict bool) error {
|
||||
urls := c.InitialPeerURLsMap[c.Name]
|
||||
// Make sure the cluster at least contains the local server.
|
||||
if urls == nil {
|
||||
// hasLocalMember checks that the cluster at least contains the local server.
|
||||
func (c *ServerConfig) hasLocalMember() error {
|
||||
if urls := c.InitialPeerURLsMap[c.Name]; urls == nil {
|
||||
return fmt.Errorf("couldn't find local name %q in the initial cluster configuration", c.Name)
|
||||
}
|
||||
|
||||
// Advertised peer URLs must match those in the cluster peer list
|
||||
apurls := c.PeerURLs.StringSlice()
|
||||
sort.Strings(apurls)
|
||||
urls.Sort()
|
||||
if strict {
|
||||
if !netutil.URLStringsEqual(apurls, urls.StringSlice()) {
|
||||
umap := map[string]types.URLs{c.Name: c.PeerURLs}
|
||||
return fmt.Errorf("--initial-cluster must include %s given --initial-advertise-peer-urls=%s", types.URLsMap(umap).String(), strings.Join(apurls, ","))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ServerConfig) MemberDir() string { return path.Join(c.DataDir, "member") }
|
||||
// advertiseMatchesCluster confirms peer URLs match those in the cluster peer list.
|
||||
func (c *ServerConfig) advertiseMatchesCluster() error {
|
||||
urls, apurls := c.InitialPeerURLsMap[c.Name], c.PeerURLs.StringSlice()
|
||||
urls.Sort()
|
||||
sort.Strings(apurls)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
|
||||
defer cancel()
|
||||
if !netutil.URLStringsEqual(ctx, apurls, urls.StringSlice()) {
|
||||
umap := map[string]types.URLs{c.Name: c.PeerURLs}
|
||||
return fmt.Errorf("--initial-cluster must include %s given --initial-advertise-peer-urls=%s", types.URLsMap(umap).String(), strings.Join(apurls, ","))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ServerConfig) MemberDir() string { return filepath.Join(c.DataDir, "member") }
|
||||
|
||||
func (c *ServerConfig) WALDir() string {
|
||||
if c.DedicatedWALDir != "" {
|
||||
return c.DedicatedWALDir
|
||||
}
|
||||
return path.Join(c.MemberDir(), "wal")
|
||||
return filepath.Join(c.MemberDir(), "wal")
|
||||
}
|
||||
|
||||
func (c *ServerConfig) SnapDir() string { return path.Join(c.MemberDir(), "snap") }
|
||||
func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") }
|
||||
|
||||
func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" }
|
||||
|
||||
|
||||
4
vendor/github.com/coreos/etcd/etcdserver/errors.go
generated
vendored
4
vendor/github.com/coreos/etcd/etcdserver/errors.go
generated
vendored
@@ -26,11 +26,13 @@ var (
|
||||
ErrTimeout = errors.New("etcdserver: request timed out")
|
||||
ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure")
|
||||
ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost")
|
||||
ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long")
|
||||
ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members")
|
||||
ErrNoLeader = errors.New("etcdserver: no leader")
|
||||
ErrRequestTooLarge = errors.New("etcdserver: request is too large")
|
||||
ErrNoSpace = errors.New("etcdserver: no space")
|
||||
ErrInvalidAuthToken = errors.New("etcdserver: invalid auth token")
|
||||
ErrTooManyRequests = errors.New("etcdserver: too many requests")
|
||||
ErrUnhealthy = errors.New("etcdserver: unhealthy cluster")
|
||||
)
|
||||
|
||||
type DiscoveryError struct {
|
||||
|
||||
340
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
generated
vendored
340
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
generated
vendored
@@ -45,6 +45,8 @@
|
||||
LeaseRevokeResponse
|
||||
LeaseKeepAliveRequest
|
||||
LeaseKeepAliveResponse
|
||||
LeaseTimeToLiveRequest
|
||||
LeaseTimeToLiveResponse
|
||||
Member
|
||||
MemberAddRequest
|
||||
MemberAddResponse
|
||||
@@ -113,26 +115,28 @@ var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
const _ = proto.ProtoPackageIsVersion1
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type Request struct {
|
||||
ID uint64 `protobuf:"varint,1,opt,name=ID,json=iD" json:"ID"`
|
||||
Method string `protobuf:"bytes,2,opt,name=Method,json=method" json:"Method"`
|
||||
Path string `protobuf:"bytes,3,opt,name=Path,json=path" json:"Path"`
|
||||
Val string `protobuf:"bytes,4,opt,name=Val,json=val" json:"Val"`
|
||||
Dir bool `protobuf:"varint,5,opt,name=Dir,json=dir" json:"Dir"`
|
||||
PrevValue string `protobuf:"bytes,6,opt,name=PrevValue,json=prevValue" json:"PrevValue"`
|
||||
PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex,json=prevIndex" json:"PrevIndex"`
|
||||
PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist,json=prevExist" json:"PrevExist,omitempty"`
|
||||
Expiration int64 `protobuf:"varint,9,opt,name=Expiration,json=expiration" json:"Expiration"`
|
||||
Wait bool `protobuf:"varint,10,opt,name=Wait,json=wait" json:"Wait"`
|
||||
Since uint64 `protobuf:"varint,11,opt,name=Since,json=since" json:"Since"`
|
||||
Recursive bool `protobuf:"varint,12,opt,name=Recursive,json=recursive" json:"Recursive"`
|
||||
Sorted bool `protobuf:"varint,13,opt,name=Sorted,json=sorted" json:"Sorted"`
|
||||
Quorum bool `protobuf:"varint,14,opt,name=Quorum,json=quorum" json:"Quorum"`
|
||||
Time int64 `protobuf:"varint,15,opt,name=Time,json=time" json:"Time"`
|
||||
Stream bool `protobuf:"varint,16,opt,name=Stream,json=stream" json:"Stream"`
|
||||
Refresh *bool `protobuf:"varint,17,opt,name=Refresh,json=refresh" json:"Refresh,omitempty"`
|
||||
ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"`
|
||||
Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"`
|
||||
Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"`
|
||||
Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"`
|
||||
Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"`
|
||||
PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"`
|
||||
PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"`
|
||||
PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"`
|
||||
Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"`
|
||||
Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"`
|
||||
Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"`
|
||||
Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"`
|
||||
Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"`
|
||||
Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"`
|
||||
Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"`
|
||||
Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"`
|
||||
Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
@@ -142,8 +146,8 @@ func (*Request) ProtoMessage() {}
|
||||
func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{0} }
|
||||
|
||||
type Metadata struct {
|
||||
NodeID uint64 `protobuf:"varint,1,opt,name=NodeID,json=nodeID" json:"NodeID"`
|
||||
ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID,json=clusterID" json:"ClusterID"`
|
||||
NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"`
|
||||
ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
@@ -156,182 +160,182 @@ func init() {
|
||||
proto.RegisterType((*Request)(nil), "etcdserverpb.Request")
|
||||
proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata")
|
||||
}
|
||||
func (m *Request) Marshal() (data []byte, err error) {
|
||||
func (m *Request) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Request) MarshalTo(data []byte) (int, error) {
|
||||
func (m *Request) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
data[i] = 0x8
|
||||
dAtA[i] = 0x8
|
||||
i++
|
||||
i = encodeVarintEtcdserver(data, i, uint64(m.ID))
|
||||
data[i] = 0x12
|
||||
i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID))
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintEtcdserver(data, i, uint64(len(m.Method)))
|
||||
i += copy(data[i:], m.Method)
|
||||
data[i] = 0x1a
|
||||
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method)))
|
||||
i += copy(dAtA[i:], m.Method)
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
i = encodeVarintEtcdserver(data, i, uint64(len(m.Path)))
|
||||
i += copy(data[i:], m.Path)
|
||||
data[i] = 0x22
|
||||
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path)))
|
||||
i += copy(dAtA[i:], m.Path)
|
||||
dAtA[i] = 0x22
|
||||
i++
|
||||
i = encodeVarintEtcdserver(data, i, uint64(len(m.Val)))
|
||||
i += copy(data[i:], m.Val)
|
||||
data[i] = 0x28
|
||||
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val)))
|
||||
i += copy(dAtA[i:], m.Val)
|
||||
dAtA[i] = 0x28
|
||||
i++
|
||||
if m.Dir {
|
||||
data[i] = 1
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
data[i] = 0
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
data[i] = 0x32
|
||||
dAtA[i] = 0x32
|
||||
i++
|
||||
i = encodeVarintEtcdserver(data, i, uint64(len(m.PrevValue)))
|
||||
i += copy(data[i:], m.PrevValue)
|
||||
data[i] = 0x38
|
||||
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue)))
|
||||
i += copy(dAtA[i:], m.PrevValue)
|
||||
dAtA[i] = 0x38
|
||||
i++
|
||||
i = encodeVarintEtcdserver(data, i, uint64(m.PrevIndex))
|
||||
i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex))
|
||||
if m.PrevExist != nil {
|
||||
data[i] = 0x40
|
||||
dAtA[i] = 0x40
|
||||
i++
|
||||
if *m.PrevExist {
|
||||
data[i] = 1
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
data[i] = 0
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
}
|
||||
data[i] = 0x48
|
||||
dAtA[i] = 0x48
|
||||
i++
|
||||
i = encodeVarintEtcdserver(data, i, uint64(m.Expiration))
|
||||
data[i] = 0x50
|
||||
i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration))
|
||||
dAtA[i] = 0x50
|
||||
i++
|
||||
if m.Wait {
|
||||
data[i] = 1
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
data[i] = 0
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
data[i] = 0x58
|
||||
dAtA[i] = 0x58
|
||||
i++
|
||||
i = encodeVarintEtcdserver(data, i, uint64(m.Since))
|
||||
data[i] = 0x60
|
||||
i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since))
|
||||
dAtA[i] = 0x60
|
||||
i++
|
||||
if m.Recursive {
|
||||
data[i] = 1
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
data[i] = 0
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
data[i] = 0x68
|
||||
dAtA[i] = 0x68
|
||||
i++
|
||||
if m.Sorted {
|
||||
data[i] = 1
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
data[i] = 0
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
data[i] = 0x70
|
||||
dAtA[i] = 0x70
|
||||
i++
|
||||
if m.Quorum {
|
||||
data[i] = 1
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
data[i] = 0
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
data[i] = 0x78
|
||||
dAtA[i] = 0x78
|
||||
i++
|
||||
i = encodeVarintEtcdserver(data, i, uint64(m.Time))
|
||||
data[i] = 0x80
|
||||
i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time))
|
||||
dAtA[i] = 0x80
|
||||
i++
|
||||
data[i] = 0x1
|
||||
dAtA[i] = 0x1
|
||||
i++
|
||||
if m.Stream {
|
||||
data[i] = 1
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
data[i] = 0
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
if m.Refresh != nil {
|
||||
data[i] = 0x88
|
||||
dAtA[i] = 0x88
|
||||
i++
|
||||
data[i] = 0x1
|
||||
dAtA[i] = 0x1
|
||||
i++
|
||||
if *m.Refresh {
|
||||
data[i] = 1
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
data[i] = 0
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(data[i:], m.XXX_unrecognized)
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *Metadata) Marshal() (data []byte, err error) {
|
||||
func (m *Metadata) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Metadata) MarshalTo(data []byte) (int, error) {
|
||||
func (m *Metadata) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
data[i] = 0x8
|
||||
dAtA[i] = 0x8
|
||||
i++
|
||||
i = encodeVarintEtcdserver(data, i, uint64(m.NodeID))
|
||||
data[i] = 0x10
|
||||
i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID))
|
||||
dAtA[i] = 0x10
|
||||
i++
|
||||
i = encodeVarintEtcdserver(data, i, uint64(m.ClusterID))
|
||||
i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID))
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(data[i:], m.XXX_unrecognized)
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Etcdserver(data []byte, offset int, v uint64) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
data[offset+4] = uint8(v >> 32)
|
||||
data[offset+5] = uint8(v >> 40)
|
||||
data[offset+6] = uint8(v >> 48)
|
||||
data[offset+7] = uint8(v >> 56)
|
||||
func encodeFixed64Etcdserver(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Etcdserver(data []byte, offset int, v uint32) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
func encodeFixed32Etcdserver(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintEtcdserver(data []byte, offset int, v uint64) int {
|
||||
func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
data[offset] = uint8(v&0x7f | 0x80)
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
data[offset] = uint8(v)
|
||||
dAtA[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func (m *Request) Size() (n int) {
|
||||
@@ -392,8 +396,8 @@ func sovEtcdserver(x uint64) (n int) {
|
||||
func sozEtcdserver(x uint64) (n int) {
|
||||
return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *Request) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
func (m *Request) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
@@ -405,7 +409,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -433,7 +437,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.ID |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -452,7 +456,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -467,7 +471,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Method = string(data[iNdEx:postIndex])
|
||||
m.Method = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
@@ -481,7 +485,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -496,7 +500,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Path = string(data[iNdEx:postIndex])
|
||||
m.Path = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 4:
|
||||
if wireType != 2 {
|
||||
@@ -510,7 +514,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -525,7 +529,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Val = string(data[iNdEx:postIndex])
|
||||
m.Val = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 5:
|
||||
if wireType != 0 {
|
||||
@@ -539,7 +543,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -559,7 +563,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -574,7 +578,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.PrevValue = string(data[iNdEx:postIndex])
|
||||
m.PrevValue = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 7:
|
||||
if wireType != 0 {
|
||||
@@ -588,7 +592,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.PrevIndex |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -607,7 +611,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -628,7 +632,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Expiration |= (int64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -647,7 +651,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -667,7 +671,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Since |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -686,7 +690,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -706,7 +710,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -726,7 +730,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -746,7 +750,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Time |= (int64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -765,7 +769,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -785,7 +789,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -796,7 +800,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
m.Refresh = &b
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipEtcdserver(data[iNdEx:])
|
||||
skippy, err := skipEtcdserver(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -806,7 +810,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@@ -816,8 +820,8 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *Metadata) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
func (m *Metadata) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
@@ -829,7 +833,7 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -857,7 +861,7 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.NodeID |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -876,7 +880,7 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.ClusterID |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -885,7 +889,7 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipEtcdserver(data[iNdEx:])
|
||||
skippy, err := skipEtcdserver(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -895,7 +899,7 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@@ -905,8 +909,8 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipEtcdserver(data []byte) (n int, err error) {
|
||||
l := len(data)
|
||||
func skipEtcdserver(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
@@ -917,7 +921,7 @@ func skipEtcdserver(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -935,7 +939,7 @@ func skipEtcdserver(data []byte) (n int, err error) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if data[iNdEx-1] < 0x80 {
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -952,7 +956,7 @@ func skipEtcdserver(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -975,7 +979,7 @@ func skipEtcdserver(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -986,7 +990,7 @@ func skipEtcdserver(data []byte) (n int, err error) {
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipEtcdserver(data[start:])
|
||||
next, err := skipEtcdserver(dAtA[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -1010,32 +1014,32 @@ var (
|
||||
ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() { proto.RegisterFile("etcdserver.proto", fileDescriptorEtcdserver) }
|
||||
|
||||
var fileDescriptorEtcdserver = []byte{
|
||||
// 404 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x5c, 0x92, 0x41, 0x6e, 0x13, 0x31,
|
||||
0x14, 0x86, 0xe3, 0xc4, 0x99, 0x64, 0x4c, 0x81, 0x62, 0x45, 0xe8, 0xa9, 0x42, 0x43, 0x14, 0xb1,
|
||||
0xc8, 0x0a, 0xee, 0x50, 0xd2, 0x45, 0x24, 0x8a, 0x4a, 0x8a, 0xca, 0xda, 0x64, 0x1e, 0x8d, 0xa5,
|
||||
0xcc, 0x78, 0x6a, 0xbf, 0x19, 0x72, 0x03, 0xae, 0xc0, 0x91, 0xb2, 0xe4, 0x04, 0x08, 0xc2, 0x45,
|
||||
0x90, 0x3d, 0x9d, 0x60, 0xba, 0xb3, 0xbe, 0xff, 0xf7, 0xef, 0xdf, 0xf6, 0x13, 0xa7, 0x48, 0xeb,
|
||||
0xdc, 0xa1, 0x6d, 0xd0, 0xbe, 0xae, 0xac, 0x21, 0x23, 0x4f, 0xfe, 0x91, 0xea, 0xf3, 0xd9, 0xe4,
|
||||
0xd6, 0xdc, 0x9a, 0x20, 0xbc, 0xf1, 0xab, 0xd6, 0x33, 0xfb, 0xc6, 0xc5, 0x68, 0x85, 0x77, 0x35,
|
||||
0x3a, 0x92, 0x13, 0xd1, 0x5f, 0x2e, 0x80, 0x4d, 0xd9, 0x9c, 0x9f, 0xf3, 0xfd, 0xcf, 0x97, 0xbd,
|
||||
0x55, 0x5f, 0x2f, 0xe4, 0x0b, 0x91, 0x5c, 0x22, 0x6d, 0x4c, 0x0e, 0xfd, 0x29, 0x9b, 0xa7, 0xf7,
|
||||
0x4a, 0x52, 0x04, 0x26, 0x41, 0xf0, 0x2b, 0x45, 0x1b, 0x18, 0x44, 0x1a, 0xaf, 0x14, 0x6d, 0xe4,
|
||||
0x73, 0x31, 0xb8, 0x51, 0x5b, 0xe0, 0x91, 0x30, 0x68, 0xd4, 0xd6, 0xf3, 0x85, 0xb6, 0x30, 0x9c,
|
||||
0xb2, 0xf9, 0xb8, 0xe3, 0xb9, 0xb6, 0x72, 0x26, 0xd2, 0x2b, 0x8b, 0xcd, 0x8d, 0xda, 0xd6, 0x08,
|
||||
0x49, 0xb4, 0x2b, 0xad, 0x3a, 0xdc, 0x79, 0x96, 0x65, 0x8e, 0x3b, 0x18, 0x45, 0x45, 0x83, 0x27,
|
||||
0xe0, 0xce, 0x73, 0xb1, 0xd3, 0x8e, 0x60, 0x7c, 0x3c, 0x85, 0xb5, 0x9e, 0x80, 0xe5, 0x2b, 0x21,
|
||||
0x2e, 0x76, 0x95, 0xb6, 0x8a, 0xb4, 0x29, 0x21, 0x9d, 0xb2, 0xf9, 0xe0, 0x3e, 0x48, 0xe0, 0x91,
|
||||
0xfb, 0xbb, 0x7d, 0x52, 0x9a, 0x40, 0x44, 0x55, 0xf9, 0x57, 0xa5, 0x49, 0x9e, 0x89, 0xe1, 0xb5,
|
||||
0x2e, 0xd7, 0x08, 0x8f, 0xa2, 0x0e, 0x43, 0xe7, 0x91, 0x3f, 0x7f, 0x85, 0xeb, 0xda, 0x3a, 0xdd,
|
||||
0x20, 0x9c, 0x44, 0x5b, 0x53, 0xdb, 0x61, 0xff, 0xa6, 0xd7, 0xc6, 0x12, 0xe6, 0xf0, 0x38, 0x32,
|
||||
0x24, 0x2e, 0x30, 0xaf, 0x7e, 0xa8, 0x8d, 0xad, 0x0b, 0x78, 0x12, 0xab, 0x77, 0x81, 0xf9, 0x56,
|
||||
0x1f, 0x75, 0x81, 0xf0, 0x34, 0x6a, 0xcd, 0x49, 0x17, 0x6d, 0x2a, 0x59, 0x54, 0x05, 0x9c, 0xfe,
|
||||
0x97, 0x1a, 0x98, 0xcc, 0xfc, 0x47, 0x7f, 0xb1, 0xe8, 0x36, 0xf0, 0x2c, 0x7a, 0x95, 0x91, 0x6d,
|
||||
0xe1, 0xec, 0x9d, 0x18, 0x5f, 0x22, 0xa9, 0x5c, 0x91, 0xf2, 0x49, 0xef, 0x4d, 0x8e, 0x0f, 0xa6,
|
||||
0x21, 0x29, 0x03, 0xf3, 0x37, 0x7c, 0xbb, 0xad, 0x1d, 0xa1, 0x5d, 0x2e, 0xc2, 0x50, 0x1c, 0x7f,
|
||||
0x61, 0xdd, 0xe1, 0xf3, 0xc9, 0xfe, 0x77, 0xd6, 0xdb, 0x1f, 0x32, 0xf6, 0xe3, 0x90, 0xb1, 0x5f,
|
||||
0x87, 0x8c, 0x7d, 0xff, 0x93, 0xf5, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x80, 0x62, 0xfc, 0x40,
|
||||
0xa4, 0x02, 0x00, 0x00,
|
||||
// 380 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30,
|
||||
0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb,
|
||||
0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58,
|
||||
0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f,
|
||||
0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79,
|
||||
0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d,
|
||||
0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a,
|
||||
0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89,
|
||||
0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93,
|
||||
0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe,
|
||||
0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c,
|
||||
0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70,
|
||||
0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab,
|
||||
0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11,
|
||||
0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7,
|
||||
0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89,
|
||||
0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82,
|
||||
0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6,
|
||||
0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63,
|
||||
0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6,
|
||||
0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff,
|
||||
0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea,
|
||||
0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f,
|
||||
0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00,
|
||||
}
|
||||
|
||||
608
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
generated
vendored
608
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
2
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto
generated
vendored
2
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto
generated
vendored
@@ -14,6 +14,8 @@ message RequestHeader {
|
||||
uint64 ID = 1;
|
||||
// username is a username that is associated with an auth token of gRPC connection
|
||||
string username = 2;
|
||||
// auth_revision is a revision number of auth.authStore. It is not related to mvcc
|
||||
uint64 auth_revision = 3;
|
||||
}
|
||||
|
||||
// An InternalRaftRequest is the union of all requests which can be
|
||||
|
||||
5435
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
generated
vendored
5435
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
45
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go
generated
vendored
45
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go
generated
vendored
@@ -222,6 +222,19 @@ func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marsh
|
||||
return stream, metadata, nil
|
||||
}
|
||||
|
||||
func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq LeaseTimeToLiveRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
|
||||
return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq MemberAddRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
@@ -935,6 +948,34 @@ func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_Lease_LeaseTimeToLive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
if cn, ok := w.(http.CloseNotifier); ok {
|
||||
go func(done <-chan struct{}, closed <-chan bool) {
|
||||
select {
|
||||
case <-done:
|
||||
case <-closed:
|
||||
cancel()
|
||||
}
|
||||
}(ctx.Done(), cn.CloseNotify())
|
||||
}
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, req)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, outboundMarshaler, w, req, err)
|
||||
}
|
||||
resp, md, err := request_Lease_LeaseTimeToLive_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Lease_LeaseTimeToLive_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -944,6 +985,8 @@ var (
|
||||
pattern_Lease_LeaseRevoke_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "kv", "lease", "revoke"}, ""))
|
||||
|
||||
pattern_Lease_LeaseKeepAlive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "lease", "keepalive"}, ""))
|
||||
|
||||
pattern_Lease_LeaseTimeToLive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "kv", "lease", "timetolive"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -952,6 +995,8 @@ var (
|
||||
forward_Lease_LeaseRevoke_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_Lease_LeaseKeepAlive_0 = runtime.ForwardResponseStream
|
||||
|
||||
forward_Lease_LeaseTimeToLive_0 = runtime.ForwardResponseMessage
|
||||
)
|
||||
|
||||
// RegisterClusterHandlerFromEndpoint is same as RegisterClusterHandler but
|
||||
|
||||
60
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto
generated
vendored
60
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto
generated
vendored
@@ -104,8 +104,15 @@ service Lease {
|
||||
};
|
||||
}
|
||||
|
||||
// LeaseTimeToLive retrieves lease information.
|
||||
rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v3alpha/kv/lease/timetolive"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// TODO(xiangli) List all existing Leases?
|
||||
// TODO(xiangli) Get details information (expirations, leased keys, etc.) of a lease?
|
||||
}
|
||||
|
||||
service Cluster {
|
||||
@@ -375,6 +382,22 @@ message RangeRequest {
|
||||
|
||||
// count_only when set returns only the count of the keys in the range.
|
||||
bool count_only = 9;
|
||||
|
||||
// min_mod_revision is the lower bound for returned key mod revisions; all keys with
|
||||
// lesser mod revisions will be filtered away.
|
||||
int64 min_mod_revision = 10;
|
||||
|
||||
// max_mod_revision is the upper bound for returned key mod revisions; all keys with
|
||||
// greater mod revisions will be filtered away.
|
||||
int64 max_mod_revision = 11;
|
||||
|
||||
// min_create_revision is the lower bound for returned key create revisions; all keys with
|
||||
// lesser create trevisions will be filtered away.
|
||||
int64 min_create_revision = 12;
|
||||
|
||||
// max_create_revision is the upper bound for returned key create revisions; all keys with
|
||||
// greater create revisions will be filtered away.
|
||||
int64 max_create_revision = 13;
|
||||
}
|
||||
|
||||
message RangeResponse {
|
||||
@@ -413,8 +436,11 @@ message DeleteRangeRequest {
|
||||
bytes key = 1;
|
||||
// range_end is the key following the last key to delete for the range [key, range_end).
|
||||
// If range_end is not given, the range is defined to contain only the key argument.
|
||||
// If range_end is one bit larger than the given key, then the range is all
|
||||
// the all keys with the prefix (the given key).
|
||||
// If range_end is '\0', the range is all keys greater than or equal to the key argument.
|
||||
bytes range_end = 2;
|
||||
|
||||
// If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
|
||||
// The previous key-value pairs will be returned in the delte response.
|
||||
bool prev_kv = 3;
|
||||
@@ -451,6 +477,7 @@ message Compare {
|
||||
EQUAL = 0;
|
||||
GREATER = 1;
|
||||
LESS = 2;
|
||||
NOT_EQUAL = 3;
|
||||
}
|
||||
enum CompareTarget {
|
||||
VERSION = 0;
|
||||
@@ -566,6 +593,8 @@ message WatchCreateRequest {
|
||||
// range_end is the end of the range [key, range_end) to watch. If range_end is not given,
|
||||
// only the key argument is watched. If range_end is equal to '\0', all keys greater than
|
||||
// or equal to the key argument are watched.
|
||||
// If the range_end is one bit larger than the given key,
|
||||
// then all keys with the prefix (the given key) will be watched.
|
||||
bytes range_end = 2;
|
||||
// start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
|
||||
int64 start_revision = 3;
|
||||
@@ -574,6 +603,16 @@ message WatchCreateRequest {
|
||||
// wish to recover a disconnected watcher starting from a recent known revision.
|
||||
// The etcd server may decide how often it will send notifications based on current load.
|
||||
bool progress_notify = 4;
|
||||
|
||||
enum FilterType {
|
||||
// filter out put event.
|
||||
NOPUT = 0;
|
||||
// filter out delete event.
|
||||
NODELETE = 1;
|
||||
}
|
||||
// filters filter the events at server side before it sends back to the watcher.
|
||||
repeated FilterType filters = 5;
|
||||
|
||||
// If prev_kv is set, created watcher gets the previous KV before the event happens.
|
||||
// If the previous KV is already compacted, nothing will be returned.
|
||||
bool prev_kv = 6;
|
||||
@@ -647,6 +686,25 @@ message LeaseKeepAliveResponse {
|
||||
int64 TTL = 3;
|
||||
}
|
||||
|
||||
message LeaseTimeToLiveRequest {
|
||||
// ID is the lease ID for the lease.
|
||||
int64 ID = 1;
|
||||
// keys is true to query all the keys attached to this lease.
|
||||
bool keys = 2;
|
||||
}
|
||||
|
||||
message LeaseTimeToLiveResponse {
|
||||
ResponseHeader header = 1;
|
||||
// ID is the lease ID from the keep alive request.
|
||||
int64 ID = 2;
|
||||
// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
|
||||
int64 TTL = 3;
|
||||
// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
|
||||
int64 grantedTTL = 4;
|
||||
// Keys is the list of keys attached to this lease.
|
||||
repeated bytes keys = 5;
|
||||
}
|
||||
|
||||
message Member {
|
||||
// ID is the member ID for this member.
|
||||
uint64 ID = 1;
|
||||
|
||||
7
vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go
generated
vendored
7
vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go
generated
vendored
@@ -24,6 +24,9 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
"github.com/coreos/etcd/pkg/netutil"
|
||||
@@ -484,8 +487,10 @@ func ValidateClusterAndAssignIDs(local *RaftCluster, existing *RaftCluster) erro
|
||||
sort.Sort(MembersByPeerURLs(ems))
|
||||
sort.Sort(MembersByPeerURLs(lms))
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
|
||||
defer cancel()
|
||||
for i := range ems {
|
||||
if !netutil.URLStringsEqual(ems[i].PeerURLs, lms[i].PeerURLs) {
|
||||
if !netutil.URLStringsEqual(ctx, ems[i].PeerURLs, lms[i].PeerURLs) {
|
||||
return fmt.Errorf("unmatched member while checking PeerURLs")
|
||||
}
|
||||
lms[i].ID = ems[i].ID
|
||||
|
||||
2
vendor/github.com/coreos/etcd/etcdserver/membership/member.go
generated
vendored
2
vendor/github.com/coreos/etcd/etcdserver/membership/member.go
generated
vendored
@@ -27,7 +27,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver", "membership")
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/membership")
|
||||
)
|
||||
|
||||
// RaftAttributes represents the raft related attributes of an etcd member.
|
||||
|
||||
2
vendor/github.com/coreos/etcd/etcdserver/metrics.go
generated
vendored
2
vendor/github.com/coreos/etcd/etcdserver/metrics.go
generated
vendored
@@ -52,7 +52,7 @@ var (
|
||||
Name: "proposals_pending",
|
||||
Help: "The current number of pending proposals to commit.",
|
||||
})
|
||||
proposalsFailed = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
proposalsFailed = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "server",
|
||||
Name: "proposals_failed_total",
|
||||
|
||||
132
vendor/github.com/coreos/etcd/etcdserver/raft.go
generated
vendored
132
vendor/github.com/coreos/etcd/etcdserver/raft.go
generated
vendored
@@ -98,18 +98,25 @@ type raftNode struct {
|
||||
// last lead elected time
|
||||
lt time.Time
|
||||
|
||||
// to check if msg receiver is removed from cluster
|
||||
isIDRemoved func(id uint64) bool
|
||||
|
||||
raft.Node
|
||||
|
||||
// a chan to send/receive snapshot
|
||||
msgSnapC chan raftpb.Message
|
||||
|
||||
// a chan to send out apply
|
||||
applyc chan apply
|
||||
|
||||
// TODO: remove the etcdserver related logic from raftNode
|
||||
// TODO: add a state machine interface to apply the commit entries
|
||||
// and do snapshot/recover
|
||||
s *EtcdServer
|
||||
// a chan to send out readState
|
||||
readStateC chan raft.ReadState
|
||||
|
||||
// utility
|
||||
ticker <-chan time.Time
|
||||
ticker <-chan time.Time
|
||||
// contention detectors for raft heartbeat message
|
||||
td *contention.TimeoutDetector
|
||||
heartbeat time.Duration // for logging
|
||||
raftStorage *raft.MemoryStorage
|
||||
storage Storage
|
||||
// transport specifies the transport to send and receive msgs to members.
|
||||
@@ -118,32 +125,19 @@ type raftNode struct {
|
||||
// If transport is nil, server will panic.
|
||||
transport rafthttp.Transporter
|
||||
|
||||
td *contention.TimeoutDetector
|
||||
|
||||
stopped chan struct{}
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// start prepares and starts raftNode in a new goroutine. It is no longer safe
|
||||
// to modify the fields after it has been started.
|
||||
// TODO: Ideally raftNode should get rid of the passed in server structure.
|
||||
func (r *raftNode) start(s *EtcdServer) {
|
||||
r.s = s
|
||||
func (r *raftNode) start(rh *raftReadyHandler) {
|
||||
r.applyc = make(chan apply)
|
||||
r.stopped = make(chan struct{})
|
||||
r.done = make(chan struct{})
|
||||
|
||||
heartbeat := 200 * time.Millisecond
|
||||
if s.Cfg != nil {
|
||||
heartbeat = time.Duration(s.Cfg.TickMs) * time.Millisecond
|
||||
}
|
||||
// set up contention detectors for raft heartbeat message.
|
||||
// expect to send a heartbeat within 2 heartbeat intervals.
|
||||
r.td = contention.NewTimeoutDetector(2 * heartbeat)
|
||||
internalTimeout := time.Second
|
||||
|
||||
go func() {
|
||||
var syncC <-chan time.Time
|
||||
|
||||
defer r.onStop()
|
||||
islead := false
|
||||
|
||||
@@ -167,32 +161,17 @@ func (r *raftNode) start(s *EtcdServer) {
|
||||
}
|
||||
|
||||
atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
|
||||
if rd.RaftState == raft.StateLeader {
|
||||
islead = true
|
||||
// TODO: raft should send server a notification through chan when
|
||||
// it promotes or demotes instead of modifying server directly.
|
||||
syncC = r.s.SyncTicker
|
||||
if r.s.lessor != nil {
|
||||
r.s.lessor.Promote(r.s.Cfg.electionTimeout())
|
||||
}
|
||||
// TODO: remove the nil checking
|
||||
// current test utility does not provide the stats
|
||||
if r.s.stats != nil {
|
||||
r.s.stats.BecomeLeader()
|
||||
}
|
||||
if r.s.compactor != nil {
|
||||
r.s.compactor.Resume()
|
||||
}
|
||||
r.td.Reset()
|
||||
} else {
|
||||
islead = false
|
||||
if r.s.lessor != nil {
|
||||
r.s.lessor.Demote()
|
||||
}
|
||||
if r.s.compactor != nil {
|
||||
r.s.compactor.Pause()
|
||||
}
|
||||
syncC = nil
|
||||
islead = rd.RaftState == raft.StateLeader
|
||||
rh.updateLeadership()
|
||||
}
|
||||
|
||||
if len(rd.ReadStates) != 0 {
|
||||
select {
|
||||
case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
|
||||
case <-time.After(internalTimeout):
|
||||
plog.Warningf("timed out sending read state")
|
||||
case <-r.stopped:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -203,6 +182,8 @@ func (r *raftNode) start(s *EtcdServer) {
|
||||
raftDone: raftDone,
|
||||
}
|
||||
|
||||
updateCommittedIndex(&ap, rh)
|
||||
|
||||
select {
|
||||
case r.applyc <- ap:
|
||||
case <-r.stopped:
|
||||
@@ -214,7 +195,7 @@ func (r *raftNode) start(s *EtcdServer) {
|
||||
// For more details, check raft thesis 10.2.1
|
||||
if islead {
|
||||
// gofail: var raftBeforeLeaderSend struct{}
|
||||
r.s.send(rd.Messages)
|
||||
r.sendMessages(rd.Messages)
|
||||
}
|
||||
|
||||
// gofail: var raftBeforeSave struct{}
|
||||
@@ -241,12 +222,10 @@ func (r *raftNode) start(s *EtcdServer) {
|
||||
|
||||
if !islead {
|
||||
// gofail: var raftBeforeFollowerSend struct{}
|
||||
r.s.send(rd.Messages)
|
||||
r.sendMessages(rd.Messages)
|
||||
}
|
||||
raftDone <- struct{}{}
|
||||
r.Advance()
|
||||
case <-syncC:
|
||||
r.s.sync(r.s.Cfg.ReqTimeout())
|
||||
case <-r.stopped:
|
||||
return
|
||||
}
|
||||
@@ -254,6 +233,59 @@ func (r *raftNode) start(s *EtcdServer) {
|
||||
}()
|
||||
}
|
||||
|
||||
func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
|
||||
var ci uint64
|
||||
if len(ap.entries) != 0 {
|
||||
ci = ap.entries[len(ap.entries)-1].Index
|
||||
}
|
||||
if ap.snapshot.Metadata.Index > ci {
|
||||
ci = ap.snapshot.Metadata.Index
|
||||
}
|
||||
if ci != 0 {
|
||||
rh.updateCommittedIndex(ci)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *raftNode) sendMessages(ms []raftpb.Message) {
|
||||
sentAppResp := false
|
||||
for i := len(ms) - 1; i >= 0; i-- {
|
||||
if r.isIDRemoved(ms[i].To) {
|
||||
ms[i].To = 0
|
||||
}
|
||||
|
||||
if ms[i].Type == raftpb.MsgAppResp {
|
||||
if sentAppResp {
|
||||
ms[i].To = 0
|
||||
} else {
|
||||
sentAppResp = true
|
||||
}
|
||||
}
|
||||
|
||||
if ms[i].Type == raftpb.MsgSnap {
|
||||
// There are two separate data store: the store for v2, and the KV for v3.
|
||||
// The msgSnap only contains the most recent snapshot of store without KV.
|
||||
// So we need to redirect the msgSnap to etcd server main loop for merging in the
|
||||
// current store snapshot and KV snapshot.
|
||||
select {
|
||||
case r.msgSnapC <- ms[i]:
|
||||
default:
|
||||
// drop msgSnap if the inflight chan if full.
|
||||
}
|
||||
ms[i].To = 0
|
||||
}
|
||||
if ms[i].Type == raftpb.MsgHeartbeat {
|
||||
ok, exceed := r.td.Observe(ms[i].To)
|
||||
if !ok {
|
||||
// TODO: limit request rate.
|
||||
plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed)
|
||||
plog.Warningf("server is likely overloaded")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r.transport.Send(ms)
|
||||
}
|
||||
|
||||
func (r *raftNode) apply() chan apply {
|
||||
return r.applyc
|
||||
}
|
||||
|
||||
530
vendor/github.com/coreos/etcd/etcdserver/server.go
generated
vendored
530
vendor/github.com/coreos/etcd/etcdserver/server.go
generated
vendored
@@ -23,6 +23,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -40,6 +41,7 @@ import (
|
||||
"github.com/coreos/etcd/lease"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
"github.com/coreos/etcd/pkg/contention"
|
||||
"github.com/coreos/etcd/pkg/fileutil"
|
||||
"github.com/coreos/etcd/pkg/idutil"
|
||||
"github.com/coreos/etcd/pkg/pbutil"
|
||||
@@ -65,6 +67,10 @@ const (
|
||||
StoreClusterPrefix = "/0"
|
||||
StoreKeysPrefix = "/1"
|
||||
|
||||
// HealthInterval is the minimum time the cluster should be healthy
|
||||
// before accepting add member requests.
|
||||
HealthInterval = 5 * time.Second
|
||||
|
||||
purgeFileInterval = 30 * time.Second
|
||||
// monitorVersionInterval should be smaller than the timeout
|
||||
// on the connection. Or we will not be able to reuse the connection
|
||||
@@ -77,6 +83,9 @@ const (
|
||||
maxInFlightMsgSnap = 16
|
||||
|
||||
releaseDelayAfterSnapshot = 30 * time.Second
|
||||
|
||||
// maxPendingRevokes is the maximum number of outstanding expired lease revocations.
|
||||
maxPendingRevokes = 16
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -158,6 +167,7 @@ type EtcdServer struct {
|
||||
// inflightSnapshots holds count the number of snapshots currently inflight.
|
||||
inflightSnapshots int64 // must use atomic operations to access; keep 64-bit aligned.
|
||||
appliedIndex uint64 // must use atomic operations to access; keep 64-bit aligned.
|
||||
committedIndex uint64 // must use atomic operations to access; keep 64-bit aligned.
|
||||
// consistIndex used to hold the offset of current executing entry
|
||||
// It is initialized to 0 before executing any entry.
|
||||
consistIndex consistentIndex // must use atomic operations to access; keep 64-bit aligned.
|
||||
@@ -168,9 +178,23 @@ type EtcdServer struct {
|
||||
|
||||
snapCount uint64
|
||||
|
||||
w wait.Wait
|
||||
stop chan struct{}
|
||||
done chan struct{}
|
||||
w wait.Wait
|
||||
|
||||
readMu sync.RWMutex
|
||||
// read routine notifies etcd server that it waits for reading by sending an empty struct to
|
||||
// readwaitC
|
||||
readwaitc chan struct{}
|
||||
// readNotifier is used to notify the read routine that it can process the request
|
||||
// when there is no error
|
||||
readNotifier *notifier
|
||||
|
||||
// stop signals the run goroutine should shutdown.
|
||||
stop chan struct{}
|
||||
// stopping is closed by run goroutine on shutdown.
|
||||
stopping chan struct{}
|
||||
// done is closed when all goroutines from start() complete.
|
||||
done chan struct{}
|
||||
|
||||
errorc chan error
|
||||
id types.ID
|
||||
attributes membership.Attributes
|
||||
@@ -181,7 +205,12 @@ type EtcdServer struct {
|
||||
|
||||
applyV2 ApplierV2
|
||||
|
||||
applyV3 applierV3
|
||||
// applyV3 is the applier with auth and quotas
|
||||
applyV3 applierV3
|
||||
// applyV3Base is the core applier without auth or quotas
|
||||
applyV3Base applierV3
|
||||
applyWait wait.WaitTime
|
||||
|
||||
kv mvcc.ConsistentWatchableKV
|
||||
lessor lease.Lessor
|
||||
bemu sync.Mutex
|
||||
@@ -204,8 +233,8 @@ type EtcdServer struct {
|
||||
// to detect the cluster version immediately.
|
||||
forceVersionC chan struct{}
|
||||
|
||||
msgSnapC chan raftpb.Message
|
||||
|
||||
// wgMu blocks concurrent waitgroup mutation while server stopping
|
||||
wgMu sync.RWMutex
|
||||
// wg is used to wait for the go routines that depends on the server state
|
||||
// to exit when stopping the server.
|
||||
wg sync.WaitGroup
|
||||
@@ -228,15 +257,6 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||
return nil, fmt.Errorf("cannot access data directory: %v", terr)
|
||||
}
|
||||
|
||||
// Run the migrations.
|
||||
dataVer, err := version.DetectDataDir(cfg.DataDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = upgradeDataDir(cfg.DataDir, cfg.Name, dataVer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
haveWAL := wal.Exist(cfg.WALDir())
|
||||
|
||||
if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil {
|
||||
@@ -244,9 +264,24 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||
}
|
||||
ss := snap.New(cfg.SnapDir())
|
||||
|
||||
bepath := path.Join(cfg.SnapDir(), databaseFilename)
|
||||
bepath := filepath.Join(cfg.SnapDir(), databaseFilename)
|
||||
beExist := fileutil.Exist(bepath)
|
||||
be := backend.NewDefaultBackend(bepath)
|
||||
|
||||
var be backend.Backend
|
||||
beOpened := make(chan struct{})
|
||||
go func() {
|
||||
be = backend.NewDefaultBackend(bepath)
|
||||
beOpened <- struct{}{}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-beOpened:
|
||||
case <-time.After(time.Second):
|
||||
plog.Warningf("another etcd process is running with the same data dir and holding the file lock.")
|
||||
plog.Warningf("waiting for it to exit before starting...")
|
||||
<-beOpened
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
be.Close()
|
||||
@@ -372,6 +407,7 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||
sstats.Initialize()
|
||||
lstats := stats.NewLeaderStats(id.String())
|
||||
|
||||
heartbeat := time.Duration(cfg.TickMs) * time.Millisecond
|
||||
srv = &EtcdServer{
|
||||
readych: make(chan struct{}),
|
||||
Cfg: cfg,
|
||||
@@ -379,10 +415,17 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||
errorc: make(chan error, 1),
|
||||
store: st,
|
||||
r: raftNode{
|
||||
isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
|
||||
Node: n,
|
||||
ticker: time.Tick(time.Duration(cfg.TickMs) * time.Millisecond),
|
||||
ticker: time.Tick(heartbeat),
|
||||
// set up contention detectors for raft heartbeat message.
|
||||
// expect to send a heartbeat within 2 heartbeat intervals.
|
||||
td: contention.NewTimeoutDetector(2 * heartbeat),
|
||||
heartbeat: heartbeat,
|
||||
raftStorage: s,
|
||||
storage: NewStorage(w, ss),
|
||||
msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
|
||||
readStateC: make(chan raft.ReadState, 1),
|
||||
},
|
||||
id: id,
|
||||
attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
|
||||
@@ -393,15 +436,16 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||
peerRt: prt,
|
||||
reqIDGen: idutil.NewGenerator(uint16(id), time.Now()),
|
||||
forceVersionC: make(chan struct{}),
|
||||
msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
|
||||
}
|
||||
|
||||
srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster}
|
||||
|
||||
srv.be = be
|
||||
minTTL := time.Duration((3*cfg.ElectionTicks)/2) * time.Duration(cfg.TickMs) * time.Millisecond
|
||||
srv.lessor = lease.NewLessor(srv.be, int64(math.Ceil(minTTL.Seconds())))
|
||||
minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat
|
||||
|
||||
// always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
|
||||
// If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
|
||||
srv.lessor = lease.NewLessor(srv.be, int64(math.Ceil(minTTL.Seconds())))
|
||||
srv.kv = mvcc.New(srv.be, srv.lessor, &srv.consistIndex)
|
||||
if beExist {
|
||||
kvindex := srv.kv.ConsistentIndex()
|
||||
@@ -416,12 +460,16 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||
}
|
||||
srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex())
|
||||
|
||||
srv.authStore = auth.NewAuthStore(srv.be)
|
||||
srv.authStore = auth.NewAuthStore(srv.be,
|
||||
func(index uint64) <-chan struct{} {
|
||||
return srv.applyWait.Wait(index)
|
||||
})
|
||||
if h := cfg.AutoCompactionRetention; h != 0 {
|
||||
srv.compactor = compactor.NewPeriodic(h, srv.kv, srv)
|
||||
srv.compactor.Run()
|
||||
}
|
||||
|
||||
srv.applyV3Base = &applierV3backend{srv}
|
||||
if err = srv.restoreAlarms(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -463,10 +511,11 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||
// It also starts a goroutine to publish its server information.
|
||||
func (s *EtcdServer) Start() {
|
||||
s.start()
|
||||
go s.publish(s.Cfg.ReqTimeout())
|
||||
go s.purgeFile()
|
||||
go monitorFileDescriptor(s.done)
|
||||
go s.monitorVersions()
|
||||
s.goAttach(func() { s.publish(s.Cfg.ReqTimeout()) })
|
||||
s.goAttach(s.purgeFile)
|
||||
s.goAttach(func() { monitorFileDescriptor(s.stopping) })
|
||||
s.goAttach(s.monitorVersions)
|
||||
s.goAttach(s.linearizableReadLoop)
|
||||
}
|
||||
|
||||
// start prepares and starts server in a new goroutine. It is no longer safe to
|
||||
@@ -478,8 +527,12 @@ func (s *EtcdServer) start() {
|
||||
s.snapCount = DefaultSnapCount
|
||||
}
|
||||
s.w = wait.New()
|
||||
s.applyWait = wait.NewTimeList()
|
||||
s.done = make(chan struct{})
|
||||
s.stop = make(chan struct{})
|
||||
s.stopping = make(chan struct{})
|
||||
s.readwaitc = make(chan struct{}, 1)
|
||||
s.readNotifier = newNotifier()
|
||||
if s.ClusterVersion() != nil {
|
||||
plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String()))
|
||||
} else {
|
||||
@@ -503,7 +556,7 @@ func (s *EtcdServer) purgeFile() {
|
||||
plog.Fatalf("failed to purge wal file %v", e)
|
||||
case e := <-serrc:
|
||||
plog.Fatalf("failed to purge snap file %v", e)
|
||||
case <-s.done:
|
||||
case <-s.stopping:
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -516,6 +569,8 @@ func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler()
|
||||
|
||||
func (s *EtcdServer) Lessor() lease.Lessor { return s.lessor }
|
||||
|
||||
func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) }
|
||||
|
||||
func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
|
||||
if s.cluster.IsIDRemoved(types.ID(m.From)) {
|
||||
plog.Warningf("reject message from removed member %s", types.ID(m.From).String())
|
||||
@@ -540,28 +595,91 @@ func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) {
|
||||
type etcdProgress struct {
|
||||
confState raftpb.ConfState
|
||||
snapi uint64
|
||||
appliedt uint64
|
||||
appliedi uint64
|
||||
}
|
||||
|
||||
// raftReadyHandler contains a set of EtcdServer operations to be called by raftNode,
|
||||
// and helps decouple state machine logic from Raft algorithms.
|
||||
// TODO: add a state machine interface to apply the commit entries and do snapshot/recover
|
||||
type raftReadyHandler struct {
|
||||
updateLeadership func()
|
||||
updateCommittedIndex func(uint64)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) run() {
|
||||
snap, err := s.r.raftStorage.Snapshot()
|
||||
if err != nil {
|
||||
plog.Panicf("get snapshot from raft storage error: %v", err)
|
||||
}
|
||||
s.r.start(s)
|
||||
|
||||
var (
|
||||
smu sync.RWMutex
|
||||
syncC <-chan time.Time
|
||||
)
|
||||
setSyncC := func(ch <-chan time.Time) {
|
||||
smu.Lock()
|
||||
syncC = ch
|
||||
smu.Unlock()
|
||||
}
|
||||
getSyncC := func() (ch <-chan time.Time) {
|
||||
smu.RLock()
|
||||
ch = syncC
|
||||
smu.RUnlock()
|
||||
return
|
||||
}
|
||||
rh := &raftReadyHandler{
|
||||
updateLeadership: func() {
|
||||
if !s.isLeader() {
|
||||
if s.lessor != nil {
|
||||
s.lessor.Demote()
|
||||
}
|
||||
if s.compactor != nil {
|
||||
s.compactor.Pause()
|
||||
}
|
||||
setSyncC(nil)
|
||||
} else {
|
||||
setSyncC(s.SyncTicker)
|
||||
if s.compactor != nil {
|
||||
s.compactor.Resume()
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: remove the nil checking
|
||||
// current test utility does not provide the stats
|
||||
if s.stats != nil {
|
||||
s.stats.BecomeLeader()
|
||||
}
|
||||
if s.r.td != nil {
|
||||
s.r.td.Reset()
|
||||
}
|
||||
},
|
||||
updateCommittedIndex: func(ci uint64) {
|
||||
cci := s.getCommittedIndex()
|
||||
if ci > cci {
|
||||
s.setCommittedIndex(ci)
|
||||
}
|
||||
},
|
||||
}
|
||||
s.r.start(rh)
|
||||
|
||||
// asynchronously accept apply packets, dispatch progress in-order
|
||||
sched := schedule.NewFIFOScheduler()
|
||||
ep := etcdProgress{
|
||||
confState: snap.Metadata.ConfState,
|
||||
snapi: snap.Metadata.Index,
|
||||
appliedt: snap.Metadata.Term,
|
||||
appliedi: snap.Metadata.Index,
|
||||
}
|
||||
|
||||
defer func() {
|
||||
s.wgMu.Lock() // block concurrent waitgroup adds in goAttach while stopping
|
||||
close(s.stopping)
|
||||
s.wgMu.Unlock()
|
||||
|
||||
sched.Stop()
|
||||
|
||||
// wait for snapshots before closing raft so wal stays open
|
||||
// wait for gouroutines before closing raft so wal stays open
|
||||
s.wg.Wait()
|
||||
|
||||
// must stop raft after scheduler-- etcdserver can leak rafthttp pipelines
|
||||
@@ -576,6 +694,9 @@ func (s *EtcdServer) run() {
|
||||
if s.kv != nil {
|
||||
s.kv.Close()
|
||||
}
|
||||
if s.authStore != nil {
|
||||
s.authStore.Close()
|
||||
}
|
||||
if s.be != nil {
|
||||
s.be.Close()
|
||||
}
|
||||
@@ -596,15 +717,30 @@ func (s *EtcdServer) run() {
|
||||
f := func(context.Context) { s.applyAll(&ep, &ap) }
|
||||
sched.Schedule(f)
|
||||
case leases := <-expiredLeaseC:
|
||||
go func() {
|
||||
for _, l := range leases {
|
||||
s.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: int64(l.ID)})
|
||||
s.goAttach(func() {
|
||||
// Increases throughput of expired leases deletion process through parallelization
|
||||
c := make(chan struct{}, maxPendingRevokes)
|
||||
for _, lease := range leases {
|
||||
select {
|
||||
case c <- struct{}{}:
|
||||
case <-s.stopping:
|
||||
return
|
||||
}
|
||||
lid := lease.ID
|
||||
s.goAttach(func() {
|
||||
s.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: int64(lid)})
|
||||
<-c
|
||||
})
|
||||
}
|
||||
}()
|
||||
})
|
||||
case err := <-s.errorc:
|
||||
plog.Errorf("%s", err)
|
||||
plog.Infof("the data-dir used by this member must be removed.")
|
||||
return
|
||||
case <-getSyncC():
|
||||
if s.store.HasTTLKeys() {
|
||||
s.sync(s.Cfg.ReqTimeout())
|
||||
}
|
||||
case <-s.stop:
|
||||
return
|
||||
}
|
||||
@@ -622,15 +758,17 @@ func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
|
||||
plog.Warningf("avoid queries with large range/delete range!")
|
||||
}
|
||||
proposalsApplied.Set(float64(ep.appliedi))
|
||||
s.applyWait.Trigger(ep.appliedi)
|
||||
// wait for the raft routine to finish the disk writes before triggering a
|
||||
// snapshot. or applied index might be greater than the last index in raft
|
||||
// storage, since the raft routine might be slower than apply routine.
|
||||
<-apply.raftDone
|
||||
|
||||
s.triggerSnapshot(ep)
|
||||
select {
|
||||
// snapshot requested via send()
|
||||
case m := <-s.msgSnapC:
|
||||
merged := s.createMergedSnapshotMessage(m, ep.appliedi, ep.confState)
|
||||
case m := <-s.r.msgSnapC:
|
||||
merged := s.createMergedSnapshotMessage(m, ep.appliedt, ep.appliedi, ep.confState)
|
||||
s.sendMergedSnap(merged)
|
||||
default:
|
||||
}
|
||||
@@ -654,7 +792,7 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
|
||||
plog.Panicf("get database snapshot file path error: %v", err)
|
||||
}
|
||||
|
||||
fn := path.Join(s.Cfg.SnapDir(), databaseFilename)
|
||||
fn := filepath.Join(s.Cfg.SnapDir(), databaseFilename)
|
||||
if err := os.Rename(snapfn, fn); err != nil {
|
||||
plog.Panicf("rename snapshot file error: %v", err)
|
||||
}
|
||||
@@ -732,6 +870,7 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
|
||||
}
|
||||
plog.Info("finished adding peers from new cluster configuration into network...")
|
||||
|
||||
ep.appliedt = apply.snapshot.Metadata.Term
|
||||
ep.appliedi = apply.snapshot.Metadata.Index
|
||||
ep.snapi = ep.appliedi
|
||||
ep.confState = apply.snapshot.Metadata.ConfState
|
||||
@@ -753,7 +892,7 @@ func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) {
|
||||
return
|
||||
}
|
||||
var shouldstop bool
|
||||
if ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop {
|
||||
if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop {
|
||||
go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster"))
|
||||
}
|
||||
}
|
||||
@@ -768,9 +907,62 @@ func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) {
|
||||
ep.snapi = ep.appliedi
|
||||
}
|
||||
|
||||
// Stop stops the server gracefully, and shuts down the running goroutine.
|
||||
// Stop should be called after a Start(s), otherwise it will block forever.
|
||||
func (s *EtcdServer) Stop() {
|
||||
func (s *EtcdServer) isMultiNode() bool {
|
||||
return s.cluster != nil && len(s.cluster.MemberIDs()) > 1
|
||||
}
|
||||
|
||||
func (s *EtcdServer) isLeader() bool {
|
||||
return uint64(s.ID()) == s.Lead()
|
||||
}
|
||||
|
||||
// transferLeadership transfers the leader to the given transferee.
|
||||
// TODO: maybe expose to client?
|
||||
func (s *EtcdServer) transferLeadership(ctx context.Context, lead, transferee uint64) error {
|
||||
now := time.Now()
|
||||
interval := time.Duration(s.Cfg.TickMs) * time.Millisecond
|
||||
|
||||
plog.Infof("%s starts leadership transfer from %s to %s", s.ID(), types.ID(lead), types.ID(transferee))
|
||||
s.r.TransferLeadership(ctx, lead, transferee)
|
||||
for s.Lead() != transferee {
|
||||
select {
|
||||
case <-ctx.Done(): // time out
|
||||
return ErrTimeoutLeaderTransfer
|
||||
case <-time.After(interval):
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: drain all requests, or drop all messages to the old leader
|
||||
|
||||
plog.Infof("%s finished leadership transfer from %s to %s (took %v)", s.ID(), types.ID(lead), types.ID(transferee), time.Since(now))
|
||||
return nil
|
||||
}
|
||||
|
||||
// TransferLeadership transfers the leader to the chosen transferee.
|
||||
func (s *EtcdServer) TransferLeadership() error {
|
||||
if !s.isLeader() {
|
||||
plog.Printf("skipped leadership transfer for stopping non-leader member")
|
||||
return nil
|
||||
}
|
||||
|
||||
if !s.isMultiNode() {
|
||||
plog.Printf("skipped leadership transfer for single member cluster")
|
||||
return nil
|
||||
}
|
||||
|
||||
transferee, ok := longestConnected(s.r.transport, s.cluster.MemberIDs())
|
||||
if !ok {
|
||||
return ErrUnhealthy
|
||||
}
|
||||
|
||||
tm := s.Cfg.ReqTimeout()
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), tm)
|
||||
err := s.transferLeadership(ctx, s.Lead(), uint64(transferee))
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
|
||||
// HardStop stops the server without coordination with other members in the cluster.
|
||||
func (s *EtcdServer) HardStop() {
|
||||
select {
|
||||
case s.stop <- struct{}{}:
|
||||
case <-s.done:
|
||||
@@ -779,6 +971,17 @@ func (s *EtcdServer) Stop() {
|
||||
<-s.done
|
||||
}
|
||||
|
||||
// Stop stops the server gracefully, and shuts down the running goroutine.
|
||||
// Stop should be called after a Start(s), otherwise it will block forever.
|
||||
// When stopping leader, Stop transfers its leadership to one of its peers
|
||||
// before stopping the server.
|
||||
func (s *EtcdServer) Stop() {
|
||||
if err := s.TransferLeadership(); err != nil {
|
||||
plog.Warningf("%s failed to transfer leadership (%v)", s.ID(), err)
|
||||
}
|
||||
s.HardStop()
|
||||
}
|
||||
|
||||
// ReadyNotify returns a channel that will be closed when the server
|
||||
// is ready to serve client requests
|
||||
func (s *EtcdServer) ReadyNotify() <-chan struct{} { return s.readych }
|
||||
@@ -810,11 +1013,42 @@ func (s *EtcdServer) LeaderStats() []byte {
|
||||
|
||||
func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() }
|
||||
|
||||
func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error {
|
||||
if s.authStore == nil {
|
||||
// In the context of ordinal etcd process, s.authStore will never be nil.
|
||||
// This branch is for handling cases in server_test.go
|
||||
return nil
|
||||
}
|
||||
|
||||
// Note that this permission check is done in the API layer,
|
||||
// so TOCTOU problem can be caused potentially in a schedule like this:
|
||||
// update membership with user A -> revoke root role of A -> apply membership change
|
||||
// in the state machine layer
|
||||
// However, both of membership change and role management requires the root privilege.
|
||||
// So careful operation by admins can prevent the problem.
|
||||
authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.AuthStore().IsAdminPermitted(authInfo)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) error {
|
||||
if s.Cfg.StrictReconfigCheck && !s.cluster.IsReadyToAddNewMember() {
|
||||
// If s.cfg.StrictReconfigCheck is false, it means the option --strict-reconfig-check isn't passed to etcd.
|
||||
// In such a case adding a new member is allowed unconditionally
|
||||
return ErrNotEnoughStartedMembers
|
||||
if err := s.checkMembershipOperationPermission(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s.Cfg.StrictReconfigCheck {
|
||||
// by default StrictReconfigCheck is enabled; reject new members if unhealthy
|
||||
if !s.cluster.IsReadyToAddNewMember() {
|
||||
plog.Warningf("not enough started members, rejecting member add %+v", memb)
|
||||
return ErrNotEnoughStartedMembers
|
||||
}
|
||||
if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.Members()) {
|
||||
plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb)
|
||||
return ErrUnhealthy
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: move Member to protobuf type
|
||||
@@ -831,10 +1065,13 @@ func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) erro
|
||||
}
|
||||
|
||||
func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) error {
|
||||
if s.Cfg.StrictReconfigCheck && !s.cluster.IsReadyToRemoveMember(id) {
|
||||
// If s.cfg.StrictReconfigCheck is false, it means the option --strict-reconfig-check isn't passed to etcd.
|
||||
// In such a case removing a member is allowed unconditionally
|
||||
return ErrNotEnoughStartedMembers
|
||||
if err := s.checkMembershipOperationPermission(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// by default StrictReconfigCheck is enabled; reject removal if leads to quorum loss
|
||||
if err := s.mayRemoveMember(types.ID(id)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cc := raftpb.ConfChange{
|
||||
@@ -844,9 +1081,39 @@ func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) error {
|
||||
return s.configure(ctx, cc)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) mayRemoveMember(id types.ID) error {
|
||||
if !s.Cfg.StrictReconfigCheck {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !s.cluster.IsReadyToRemoveMember(uint64(id)) {
|
||||
plog.Warningf("not enough started members, rejecting remove member %s", id)
|
||||
return ErrNotEnoughStartedMembers
|
||||
}
|
||||
|
||||
// downed member is safe to remove since it's not part of the active quorum
|
||||
if t := s.r.transport.ActiveSince(id); id != s.ID() && t.IsZero() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// protect quorum if some members are down
|
||||
m := s.cluster.Members()
|
||||
active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), m)
|
||||
if (active - 1) < 1+((len(m)-1)/2) {
|
||||
plog.Warningf("reconfigure breaks active quorum, rejecting remove member %s", id)
|
||||
return ErrUnhealthy
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) error {
|
||||
b, err := json.Marshal(memb)
|
||||
if err != nil {
|
||||
b, merr := json.Marshal(memb)
|
||||
if merr != nil {
|
||||
return merr
|
||||
}
|
||||
|
||||
if err := s.checkMembershipOperationPermission(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
cc := raftpb.ConfChange{
|
||||
@@ -870,8 +1137,6 @@ func (s *EtcdServer) Lead() uint64 { return atomic.LoadUint64(&s.r.lead) }
|
||||
|
||||
func (s *EtcdServer) Leader() types.ID { return types.ID(s.Lead()) }
|
||||
|
||||
func (s *EtcdServer) IsPprofEnabled() bool { return s.Cfg.EnablePprof }
|
||||
|
||||
// configure sends a configuration change through consensus and
|
||||
// then waits for it to be applied to the server. It
|
||||
// will block until the change is performed or there is an error.
|
||||
@@ -895,7 +1160,7 @@ func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error
|
||||
case <-ctx.Done():
|
||||
s.w.Trigger(cc.ID, nil) // GC wait
|
||||
return s.parseProposeCtxErr(ctx.Err(), start)
|
||||
case <-s.done:
|
||||
case <-s.stopping:
|
||||
return ErrStopped
|
||||
}
|
||||
}
|
||||
@@ -913,10 +1178,10 @@ func (s *EtcdServer) sync(timeout time.Duration) {
|
||||
data := pbutil.MustMarshal(&req)
|
||||
// There is no promise that node has leader when do SYNC request,
|
||||
// so it uses goroutine to propose.
|
||||
go func() {
|
||||
s.goAttach(func() {
|
||||
s.r.Propose(ctx, data)
|
||||
cancel()
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
// publish registers server information into the cluster. The information
|
||||
@@ -954,52 +1219,11 @@ func (s *EtcdServer) publish(timeout time.Duration) {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: move this function into raft.go
|
||||
func (s *EtcdServer) send(ms []raftpb.Message) {
|
||||
sentAppResp := false
|
||||
for i := len(ms) - 1; i >= 0; i-- {
|
||||
if s.cluster.IsIDRemoved(types.ID(ms[i].To)) {
|
||||
ms[i].To = 0
|
||||
}
|
||||
|
||||
if ms[i].Type == raftpb.MsgAppResp {
|
||||
if sentAppResp {
|
||||
ms[i].To = 0
|
||||
} else {
|
||||
sentAppResp = true
|
||||
}
|
||||
}
|
||||
|
||||
if ms[i].Type == raftpb.MsgSnap {
|
||||
// There are two separate data store: the store for v2, and the KV for v3.
|
||||
// The msgSnap only contains the most recent snapshot of store without KV.
|
||||
// So we need to redirect the msgSnap to etcd server main loop for merging in the
|
||||
// current store snapshot and KV snapshot.
|
||||
select {
|
||||
case s.msgSnapC <- ms[i]:
|
||||
default:
|
||||
// drop msgSnap if the inflight chan if full.
|
||||
}
|
||||
ms[i].To = 0
|
||||
}
|
||||
if ms[i].Type == raftpb.MsgHeartbeat {
|
||||
ok, exceed := s.r.td.Observe(ms[i].To)
|
||||
if !ok {
|
||||
// TODO: limit request rate.
|
||||
plog.Warningf("failed to send out heartbeat on time (exceeded the %dms timeout for %v)", s.Cfg.TickMs, exceed)
|
||||
plog.Warningf("server is likely overloaded")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s.r.transport.Send(ms)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) sendMergedSnap(merged snap.Message) {
|
||||
atomic.AddInt64(&s.inflightSnapshots, 1)
|
||||
|
||||
s.r.transport.SendSnapshot(merged)
|
||||
go func() {
|
||||
s.goAttach(func() {
|
||||
select {
|
||||
case ok := <-merged.CloseNotify():
|
||||
// delay releasing inflight snapshot for another 30 seconds to
|
||||
@@ -1009,22 +1233,20 @@ func (s *EtcdServer) sendMergedSnap(merged snap.Message) {
|
||||
if ok {
|
||||
select {
|
||||
case <-time.After(releaseDelayAfterSnapshot):
|
||||
case <-s.done:
|
||||
case <-s.stopping:
|
||||
}
|
||||
}
|
||||
atomic.AddInt64(&s.inflightSnapshots, -1)
|
||||
case <-s.done:
|
||||
case <-s.stopping:
|
||||
return
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
// apply takes entries received from Raft (after it has been committed) and
|
||||
// applies them to the current state of the EtcdServer.
|
||||
// The given entries should not be empty.
|
||||
func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (uint64, bool) {
|
||||
var applied uint64
|
||||
var shouldstop bool
|
||||
func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appliedt uint64, appliedi uint64, shouldStop bool) {
|
||||
for i := range es {
|
||||
e := es[i]
|
||||
switch e.Type {
|
||||
@@ -1034,16 +1256,17 @@ func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (uint
|
||||
var cc raftpb.ConfChange
|
||||
pbutil.MustUnmarshal(&cc, e.Data)
|
||||
removedSelf, err := s.applyConfChange(cc, confState)
|
||||
shouldstop = shouldstop || removedSelf
|
||||
shouldStop = shouldStop || removedSelf
|
||||
s.w.Trigger(cc.ID, err)
|
||||
default:
|
||||
plog.Panicf("entry type should be either EntryNormal or EntryConfChange")
|
||||
}
|
||||
atomic.StoreUint64(&s.r.index, e.Index)
|
||||
atomic.StoreUint64(&s.r.term, e.Term)
|
||||
applied = e.Index
|
||||
appliedt = e.Term
|
||||
appliedi = e.Index
|
||||
}
|
||||
return applied, shouldstop
|
||||
return appliedt, appliedi, shouldStop
|
||||
}
|
||||
|
||||
// applyEntryNormal apples an EntryNormal type raftpb request to the EtcdServer
|
||||
@@ -1054,6 +1277,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
|
||||
s.consistIndex.setConsistentIndex(e.Index)
|
||||
shouldApplyV3 = true
|
||||
}
|
||||
defer s.setAppliedIndex(e.Index)
|
||||
|
||||
// raft state machine may generate noop entry when leader confirmation.
|
||||
// skip it in advance to avoid some potential bug in the future
|
||||
@@ -1062,6 +1286,11 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
|
||||
case s.forceVersionC <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
// promote lessor when the local member is leader and finished
|
||||
// applying all entries from the last term.
|
||||
if s.isLeader() {
|
||||
s.lessor.Promote(s.Cfg.electionTimeout())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1088,15 +1317,26 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
|
||||
id = raftReq.Header.ID
|
||||
}
|
||||
|
||||
ar := s.applyV3.Apply(&raftReq)
|
||||
s.setAppliedIndex(e.Index)
|
||||
var ar *applyResult
|
||||
needResult := s.w.IsRegistered(id)
|
||||
if needResult || !noSideEffect(&raftReq) {
|
||||
if !needResult && raftReq.Txn != nil {
|
||||
removeNeedlessRangeReqs(raftReq.Txn)
|
||||
}
|
||||
ar = s.applyV3.Apply(&raftReq)
|
||||
}
|
||||
|
||||
if ar == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if ar.err != ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 {
|
||||
s.w.Trigger(id, ar)
|
||||
return
|
||||
}
|
||||
|
||||
plog.Errorf("applying raft message exceeded backend quota")
|
||||
go func() {
|
||||
s.goAttach(func() {
|
||||
a := &pb.AlarmRequest{
|
||||
MemberID: uint64(s.ID()),
|
||||
Action: pb.AlarmRequest_ACTIVATE,
|
||||
@@ -1105,7 +1345,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
|
||||
r := pb.InternalRaftRequest{Alarm: a}
|
||||
s.processInternalRaftRequest(context.TODO(), r)
|
||||
s.w.Trigger(id, ar)
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
// applyConfChange applies a ConfChange to the server. It is only
|
||||
@@ -1156,11 +1396,15 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con
|
||||
// TODO: non-blocking snapshot
|
||||
func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) {
|
||||
clone := s.store.Clone()
|
||||
// commit kv to write metadata (for example: consistent index) to disk.
|
||||
// KV().commit() updates the consistent index in backend.
|
||||
// All operations that update consistent index must be called sequentially
|
||||
// from applyAll function.
|
||||
// So KV().Commit() cannot run in parallel with apply. It has to be called outside
|
||||
// the go routine created below.
|
||||
s.KV().Commit()
|
||||
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer s.wg.Done()
|
||||
|
||||
s.goAttach(func() {
|
||||
d, err := clone.SaveNoCopy()
|
||||
// TODO: current store will never fail to do a snapshot
|
||||
// what should we do if the store might fail?
|
||||
@@ -1176,8 +1420,6 @@ func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) {
|
||||
}
|
||||
plog.Panicf("unexpected create snapshot error %v", err)
|
||||
}
|
||||
// commit kv to write metadata (for example: consistent index) to disk.
|
||||
s.KV().Commit()
|
||||
// SaveSnap saves the snapshot and releases the locked wal files
|
||||
// to the snapshot index.
|
||||
if err = s.r.storage.SaveSnap(snap); err != nil {
|
||||
@@ -1210,7 +1452,23 @@ func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) {
|
||||
plog.Panicf("unexpected compaction error %v", err)
|
||||
}
|
||||
plog.Infof("compacted raft log at %d", compacti)
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
// CutPeer drops messages to the specified peer.
|
||||
func (s *EtcdServer) CutPeer(id types.ID) {
|
||||
tr, ok := s.r.transport.(*rafthttp.Transport)
|
||||
if ok {
|
||||
tr.CutPeer(id)
|
||||
}
|
||||
}
|
||||
|
||||
// MendPeer recovers the message dropping behavior of the given peer.
|
||||
func (s *EtcdServer) MendPeer(id types.ID) {
|
||||
tr, ok := s.r.transport.(*rafthttp.Transport)
|
||||
if ok {
|
||||
tr.MendPeer(id)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *EtcdServer) PauseSending() { s.r.pauseSending() }
|
||||
@@ -1233,7 +1491,7 @@ func (s *EtcdServer) monitorVersions() {
|
||||
select {
|
||||
case <-s.forceVersionC:
|
||||
case <-time.After(monitorVersionInterval):
|
||||
case <-s.done:
|
||||
case <-s.stopping:
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1254,18 +1512,18 @@ func (s *EtcdServer) monitorVersions() {
|
||||
// 1. use the decided version if possible
|
||||
// 2. or use the min cluster version
|
||||
if s.cluster.Version() == nil {
|
||||
verStr := version.MinClusterVersion
|
||||
if v != nil {
|
||||
go s.updateClusterVersion(v.String())
|
||||
} else {
|
||||
go s.updateClusterVersion(version.MinClusterVersion)
|
||||
verStr = v.String()
|
||||
}
|
||||
s.goAttach(func() { s.updateClusterVersion(verStr) })
|
||||
continue
|
||||
}
|
||||
|
||||
// update cluster version only if the decided version is greater than
|
||||
// the current cluster version
|
||||
if v != nil && s.cluster.Version().LessThan(*v) {
|
||||
go s.updateClusterVersion(v.String())
|
||||
s.goAttach(func() { s.updateClusterVersion(v.String()) })
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1355,3 +1613,31 @@ func (s *EtcdServer) getAppliedIndex() uint64 {
|
||||
func (s *EtcdServer) setAppliedIndex(v uint64) {
|
||||
atomic.StoreUint64(&s.appliedIndex, v)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) getCommittedIndex() uint64 {
|
||||
return atomic.LoadUint64(&s.committedIndex)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) setCommittedIndex(v uint64) {
|
||||
atomic.StoreUint64(&s.committedIndex, v)
|
||||
}
|
||||
|
||||
// goAttach creates a goroutine on a given function and tracks it using
|
||||
// the etcdserver waitgroup.
|
||||
func (s *EtcdServer) goAttach(f func()) {
|
||||
s.wgMu.RLock() // this blocks with ongoing close(s.stopping)
|
||||
defer s.wgMu.RUnlock()
|
||||
select {
|
||||
case <-s.stopping:
|
||||
plog.Warning("server has stopped (skipping goAttach)")
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// now safe to add since waitgroup wait has not started yet
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer s.wg.Done()
|
||||
f()
|
||||
}()
|
||||
}
|
||||
|
||||
8
vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go
generated
vendored
8
vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go
generated
vendored
@@ -16,7 +16,6 @@ package etcdserver
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
"github.com/coreos/etcd/raft/raftpb"
|
||||
@@ -26,12 +25,7 @@ import (
|
||||
// createMergedSnapshotMessage creates a snapshot message that contains: raft status (term, conf),
|
||||
// a snapshot of v2 store inside raft.Snapshot as []byte, a snapshot of v3 KV in the top level message
|
||||
// as ReadCloser.
|
||||
func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapi uint64, confState raftpb.ConfState) snap.Message {
|
||||
snapt, err := s.r.raftStorage.Term(snapi)
|
||||
if err != nil {
|
||||
log.Panicf("get term should never fail: %v", err)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi uint64, confState raftpb.ConfState) snap.Message {
|
||||
// get a snapshot of v2 store as []byte
|
||||
clone := s.store.Clone()
|
||||
d, err := clone.SaveNoCopy()
|
||||
|
||||
13
vendor/github.com/coreos/etcd/etcdserver/stats/server.go
generated
vendored
13
vendor/github.com/coreos/etcd/etcdserver/stats/server.go
generated
vendored
@@ -123,17 +123,11 @@ func (ss *ServerStats) SendAppendReq(reqSize int) {
|
||||
ss.Lock()
|
||||
defer ss.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
|
||||
if ss.State != raft.StateLeader {
|
||||
ss.State = raft.StateLeader
|
||||
ss.LeaderInfo.Name = ss.ID
|
||||
ss.LeaderInfo.StartTime = now
|
||||
}
|
||||
ss.becomeLeader()
|
||||
|
||||
ss.sendRateQueue.Insert(
|
||||
&RequestStats{
|
||||
SendingTime: now,
|
||||
SendingTime: time.Now(),
|
||||
Size: reqSize,
|
||||
},
|
||||
)
|
||||
@@ -144,7 +138,10 @@ func (ss *ServerStats) SendAppendReq(reqSize int) {
|
||||
func (ss *ServerStats) BecomeLeader() {
|
||||
ss.Lock()
|
||||
defer ss.Unlock()
|
||||
ss.becomeLeader()
|
||||
}
|
||||
|
||||
func (ss *ServerStats) becomeLeader() {
|
||||
if ss.State != raft.StateLeader {
|
||||
ss.State = raft.StateLeader
|
||||
ss.LeaderInfo.Name = ss.ID
|
||||
|
||||
2
vendor/github.com/coreos/etcd/etcdserver/stats/stats.go
generated
vendored
2
vendor/github.com/coreos/etcd/etcdserver/stats/stats.go
generated
vendored
@@ -18,7 +18,7 @@ package stats
|
||||
import "github.com/coreos/pkg/capnslog"
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver", "stats")
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/stats")
|
||||
)
|
||||
|
||||
type Stats interface {
|
||||
|
||||
42
vendor/github.com/coreos/etcd/etcdserver/storage.go
generated
vendored
42
vendor/github.com/coreos/etcd/etcdserver/storage.go
generated
vendored
@@ -16,16 +16,12 @@ package etcdserver
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/pkg/fileutil"
|
||||
"github.com/coreos/etcd/pkg/pbutil"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/etcd/raft/raftpb"
|
||||
"github.com/coreos/etcd/snap"
|
||||
"github.com/coreos/etcd/version"
|
||||
"github.com/coreos/etcd/wal"
|
||||
"github.com/coreos/etcd/wal/walpb"
|
||||
)
|
||||
@@ -103,41 +99,3 @@ func readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID,
|
||||
cid = types.ID(metadata.ClusterID)
|
||||
return
|
||||
}
|
||||
|
||||
// upgradeDataDir converts an older version of the etcdServer data to the newest version.
|
||||
// It must ensure that, after upgrading, the most recent version is present.
|
||||
func upgradeDataDir(baseDataDir string, name string, ver version.DataDirVersion) error {
|
||||
switch ver {
|
||||
case version.DataDir2_0:
|
||||
err := makeMemberDir(baseDataDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case version.DataDir2_0_1:
|
||||
fallthrough
|
||||
default:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeMemberDir(dir string) error {
|
||||
membdir := path.Join(dir, "member")
|
||||
_, err := os.Stat(membdir)
|
||||
switch {
|
||||
case err == nil:
|
||||
return nil
|
||||
case !os.IsNotExist(err):
|
||||
return err
|
||||
}
|
||||
if err := fileutil.CreateDirAll(membdir); err != nil {
|
||||
return err
|
||||
}
|
||||
names := []string{"snap", "wal"}
|
||||
for _, name := range names {
|
||||
if err := os.Rename(path.Join(dir, name), path.Join(membdir, name)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
69
vendor/github.com/coreos/etcd/etcdserver/util.go
generated
vendored
69
vendor/github.com/coreos/etcd/etcdserver/util.go
generated
vendored
@@ -25,13 +25,7 @@ import (
|
||||
// isConnectedToQuorumSince checks whether the local member is connected to the
|
||||
// quorum of the cluster since the given time.
|
||||
func isConnectedToQuorumSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
|
||||
var connectedNum int
|
||||
for _, m := range members {
|
||||
if m.ID == self || isConnectedSince(transport, since, m.ID) {
|
||||
connectedNum++
|
||||
}
|
||||
}
|
||||
return connectedNum >= (len(members)+1)/2
|
||||
return numConnectedSince(transport, since, self, members) >= (len(members)/2)+1
|
||||
}
|
||||
|
||||
// isConnectedSince checks whether the local member is connected to the
|
||||
@@ -40,3 +34,64 @@ func isConnectedSince(transport rafthttp.Transporter, since time.Time, remote ty
|
||||
t := transport.ActiveSince(remote)
|
||||
return !t.IsZero() && t.Before(since)
|
||||
}
|
||||
|
||||
// isConnectedFullySince checks whether the local member is connected to all
|
||||
// members in the cluster since the given time.
|
||||
func isConnectedFullySince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
|
||||
return numConnectedSince(transport, since, self, members) == len(members)
|
||||
}
|
||||
|
||||
// numConnectedSince counts how many members are connected to the local member
|
||||
// since the given time.
|
||||
func numConnectedSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) int {
|
||||
connectedNum := 0
|
||||
for _, m := range members {
|
||||
if m.ID == self || isConnectedSince(transport, since, m.ID) {
|
||||
connectedNum++
|
||||
}
|
||||
}
|
||||
return connectedNum
|
||||
}
|
||||
|
||||
// longestConnected chooses the member with longest active-since-time.
|
||||
// It returns false, if nothing is active.
|
||||
func longestConnected(tp rafthttp.Transporter, membs []types.ID) (types.ID, bool) {
|
||||
var longest types.ID
|
||||
var oldest time.Time
|
||||
for _, id := range membs {
|
||||
tm := tp.ActiveSince(id)
|
||||
if tm.IsZero() { // inactive
|
||||
continue
|
||||
}
|
||||
|
||||
if oldest.IsZero() { // first longest candidate
|
||||
oldest = tm
|
||||
longest = id
|
||||
}
|
||||
|
||||
if tm.Before(oldest) {
|
||||
oldest = tm
|
||||
longest = id
|
||||
}
|
||||
}
|
||||
if uint64(longest) == 0 {
|
||||
return longest, false
|
||||
}
|
||||
return longest, true
|
||||
}
|
||||
|
||||
type notifier struct {
|
||||
c chan struct{}
|
||||
err error
|
||||
}
|
||||
|
||||
func newNotifier() *notifier {
|
||||
return ¬ifier{
|
||||
c: make(chan struct{}, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (nc *notifier) notify(err error) {
|
||||
nc.err = err
|
||||
close(nc.c)
|
||||
}
|
||||
|
||||
2
vendor/github.com/coreos/etcd/etcdserver/v2_server.go
generated
vendored
2
vendor/github.com/coreos/etcd/etcdserver/v2_server.go
generated
vendored
@@ -68,7 +68,7 @@ func (a *v2apiStore) processRaftRequest(ctx context.Context, r *pb.Request) (Res
|
||||
proposalsFailed.Inc()
|
||||
a.s.w.Trigger(r.ID, nil) // GC wait
|
||||
return Response{}, a.s.parseProposeCtxErr(ctx.Err(), start)
|
||||
case <-a.s.done:
|
||||
case <-a.s.stopping:
|
||||
}
|
||||
return Response{}, ErrStopped
|
||||
}
|
||||
|
||||
440
vendor/github.com/coreos/etcd/etcdserver/v3_server.go
generated
vendored
440
vendor/github.com/coreos/etcd/etcdserver/v3_server.go
generated
vendored
@@ -15,17 +15,20 @@
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/auth"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/lease"
|
||||
"github.com/coreos/etcd/lease/leasehttp"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/raft"
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -35,8 +38,15 @@ const (
|
||||
// specify a large value might end up with shooting in the foot.
|
||||
maxRequestBytes = 1.5 * 1024 * 1024
|
||||
|
||||
// max timeout for waiting a v3 request to go through raft.
|
||||
maxV3RequestTimeout = 5 * time.Second
|
||||
// In the health case, there might be a small gap (10s of entries) between
|
||||
// the applied index and committed index.
|
||||
// However, if the committed entries are very heavy to apply, the gap might grow.
|
||||
// We should stop accepting new proposals if the gap growing to a certain point.
|
||||
maxGapBetweenApplyAndCommitIndex = 5000
|
||||
)
|
||||
|
||||
var (
|
||||
newRangeClusterVersion = *semver.Must(semver.NewVersion("3.1.0"))
|
||||
)
|
||||
|
||||
type RaftKV interface {
|
||||
@@ -56,6 +66,9 @@ type Lessor interface {
|
||||
// LeaseRenew renews the lease with given ID. The renewed TTL is returned. Or an error
|
||||
// is returned.
|
||||
LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error)
|
||||
|
||||
// LeaseTimeToLive retrieves lease information.
|
||||
LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error)
|
||||
}
|
||||
|
||||
type Authenticator interface {
|
||||
@@ -78,22 +91,44 @@ type Authenticator interface {
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||
var result *applyResult
|
||||
var err error
|
||||
// TODO: remove this checking when we release etcd 3.2
|
||||
if s.ClusterVersion() == nil || s.ClusterVersion().LessThan(newRangeClusterVersion) {
|
||||
return s.legacyRange(ctx, r)
|
||||
}
|
||||
|
||||
if r.Serializable {
|
||||
var user string
|
||||
user, err = s.usernameFromCtx(ctx)
|
||||
if !r.Serializable {
|
||||
err := s.linearizableReadNotify(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = s.applyV3.Apply(
|
||||
&pb.InternalRaftRequest{
|
||||
Header: &pb.RequestHeader{Username: user},
|
||||
Range: r})
|
||||
} else {
|
||||
result, err = s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Range: r})
|
||||
}
|
||||
var resp *pb.RangeResponse
|
||||
var err error
|
||||
chk := func(ai *auth.AuthInfo) error {
|
||||
return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd)
|
||||
}
|
||||
get := func() { resp, err = s.applyV3Base.Range(noTxn, r) }
|
||||
if serr := s.doSerialize(ctx, chk, get); serr != nil {
|
||||
return nil, serr
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// TODO: remove this func when we release etcd 3.2
|
||||
func (s *EtcdServer) legacyRange(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||
if r.Serializable {
|
||||
var resp *pb.RangeResponse
|
||||
var err error
|
||||
chk := func(ai *auth.AuthInfo) error {
|
||||
return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd)
|
||||
}
|
||||
get := func() { resp, err = s.applyV3Base.Range(noTxn, r) }
|
||||
if serr := s.doSerialize(ctx, chk, get); serr != nil {
|
||||
return nil, serr
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Range: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -126,21 +161,54 @@ func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
var result *applyResult
|
||||
var err error
|
||||
|
||||
if isTxnSerializable(r) {
|
||||
user, err := s.usernameFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = s.applyV3.Apply(
|
||||
&pb.InternalRaftRequest{
|
||||
Header: &pb.RequestHeader{Username: user},
|
||||
Txn: r})
|
||||
} else {
|
||||
result, err = s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Txn: r})
|
||||
// TODO: remove this checking when we release etcd 3.2
|
||||
if s.ClusterVersion() == nil || s.ClusterVersion().LessThan(newRangeClusterVersion) {
|
||||
return s.legacyTxn(ctx, r)
|
||||
}
|
||||
|
||||
if isTxnReadonly(r) {
|
||||
if !isTxnSerializable(r) {
|
||||
err := s.linearizableReadNotify(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var resp *pb.TxnResponse
|
||||
var err error
|
||||
chk := func(ai *auth.AuthInfo) error {
|
||||
return checkTxnAuth(s.authStore, ai, r)
|
||||
}
|
||||
get := func() { resp, err = s.applyV3Base.Txn(r) }
|
||||
if serr := s.doSerialize(ctx, chk, get); serr != nil {
|
||||
return nil, serr
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Txn: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.TxnResponse), nil
|
||||
}
|
||||
|
||||
// TODO: remove this func when we release etcd 3.2
|
||||
func (s *EtcdServer) legacyTxn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
if isTxnSerializable(r) {
|
||||
var resp *pb.TxnResponse
|
||||
var err error
|
||||
chk := func(ai *auth.AuthInfo) error {
|
||||
return checkTxnAuth(s.authStore, ai, r)
|
||||
}
|
||||
get := func() { resp, err = s.applyV3Base.Txn(r) }
|
||||
if serr := s.doSerialize(ctx, chk, get); serr != nil {
|
||||
return nil, serr
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Txn: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -164,8 +232,22 @@ func isTxnSerializable(r *pb.TxnRequest) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func isTxnReadonly(r *pb.TxnRequest) bool {
|
||||
for _, u := range r.Success {
|
||||
if r := u.GetRequestRange(); r == nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for _, u := range r.Failure {
|
||||
if r := u.GetRequestRange(); r == nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Compaction: r})
|
||||
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r})
|
||||
if r.Physical && result != nil && result.physc != nil {
|
||||
<-result.physc
|
||||
// The compaction is done deleting keys; the hash is now settled
|
||||
@@ -198,7 +280,7 @@ func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*
|
||||
// only use positive int64 id's
|
||||
r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1))
|
||||
}
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{LeaseGrant: r})
|
||||
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -209,7 +291,7 @@ func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*
|
||||
}
|
||||
|
||||
func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{LeaseRevoke: r})
|
||||
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -221,14 +303,13 @@ func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest)
|
||||
|
||||
func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) {
|
||||
ttl, err := s.lessor.Renew(id)
|
||||
if err == nil {
|
||||
if err == nil { // already requested to primary lessor(leader)
|
||||
return ttl, nil
|
||||
}
|
||||
if err != lease.ErrNotPrimary {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
// renewals don't go through raft; forward to leader manually
|
||||
cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
|
||||
defer cancel()
|
||||
|
||||
@@ -239,7 +320,7 @@ func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, e
|
||||
return -1, lerr
|
||||
}
|
||||
for _, url := range leader.PeerURLs {
|
||||
lurl := url + "/leases"
|
||||
lurl := url + leasehttp.LeasePrefix
|
||||
ttl, err = leasehttp.RenewHTTP(cctx, id, lurl, s.peerRt)
|
||||
if err == nil || err == lease.ErrLeaseNotFound {
|
||||
return ttl, err
|
||||
@@ -249,6 +330,49 @@ func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, e
|
||||
return -1, ErrTimeout
|
||||
}
|
||||
|
||||
func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
|
||||
if s.Leader() == s.ID() {
|
||||
// primary; timetolive directly from leader
|
||||
le := s.lessor.Lookup(lease.LeaseID(r.ID))
|
||||
if le == nil {
|
||||
return nil, lease.ErrLeaseNotFound
|
||||
}
|
||||
// TODO: fill out ResponseHeader
|
||||
resp := &pb.LeaseTimeToLiveResponse{Header: &pb.ResponseHeader{}, ID: r.ID, TTL: int64(le.Remaining().Seconds()), GrantedTTL: le.TTL()}
|
||||
if r.Keys {
|
||||
ks := le.Keys()
|
||||
kbs := make([][]byte, len(ks))
|
||||
for i := range ks {
|
||||
kbs[i] = []byte(ks[i])
|
||||
}
|
||||
resp.Keys = kbs
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
|
||||
defer cancel()
|
||||
|
||||
// forward to leader
|
||||
for cctx.Err() == nil {
|
||||
leader, err := s.waitLeader(cctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, url := range leader.PeerURLs {
|
||||
lurl := url + leasehttp.LeaseInternalPrefix
|
||||
resp, err := leasehttp.TimeToLiveHTTP(cctx, lease.LeaseID(r.ID), r.Keys, lurl, s.peerRt)
|
||||
if err == nil {
|
||||
return resp.LeaseTimeToLiveResponse, nil
|
||||
}
|
||||
if err == lease.ErrLeaseNotFound {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, ErrTimeout
|
||||
}
|
||||
|
||||
func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) {
|
||||
leader := s.cluster.Member(s.Leader())
|
||||
for leader == nil {
|
||||
@@ -257,7 +381,7 @@ func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error)
|
||||
select {
|
||||
case <-time.After(dur):
|
||||
leader = s.cluster.Member(s.Leader())
|
||||
case <-s.done:
|
||||
case <-s.stopping:
|
||||
return nil, ErrStopped
|
||||
case <-ctx.Done():
|
||||
return nil, ErrNoLeader
|
||||
@@ -270,7 +394,7 @@ func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Alarm: r})
|
||||
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -281,7 +405,7 @@ func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmRe
|
||||
}
|
||||
|
||||
func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthEnable: r})
|
||||
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -303,24 +427,47 @@ func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
|
||||
st, err := s.AuthStore().GenSimpleToken()
|
||||
var result *applyResult
|
||||
|
||||
err := s.linearizableReadNotify(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
internalReq := &pb.InternalAuthenticateRequest{
|
||||
Name: r.Name,
|
||||
Password: r.Password,
|
||||
SimpleToken: st,
|
||||
for {
|
||||
checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password)
|
||||
if err != nil {
|
||||
plog.Errorf("invalid authentication request to user %s was issued", r.Name)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
st, err := s.AuthStore().GenSimpleToken()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
internalReq := &pb.InternalAuthenticateRequest{
|
||||
Name: r.Name,
|
||||
Password: r.Password,
|
||||
SimpleToken: st,
|
||||
}
|
||||
|
||||
result, err = s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
|
||||
if checkedRevision != s.AuthStore().Revision() {
|
||||
plog.Infof("revision when password checked is obsolete, retrying")
|
||||
continue
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Authenticate: internalReq})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthenticateResponse), nil
|
||||
}
|
||||
|
||||
@@ -467,69 +614,52 @@ func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest
|
||||
return result.resp.(*pb.AuthRoleDeleteResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) isValidSimpleToken(token string) bool {
|
||||
splitted := strings.Split(token, ".")
|
||||
if len(splitted) != 2 {
|
||||
return false
|
||||
}
|
||||
index, err := strconv.Atoi(splitted[1])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// CAUTION: below index synchronization is required because this node
|
||||
// might not receive and apply the log entry of Authenticate() RPC.
|
||||
authApplied := false
|
||||
for i := 0; i < 10; i++ {
|
||||
if uint64(index) <= s.getAppliedIndex() {
|
||||
authApplied = true
|
||||
break
|
||||
// doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure.
|
||||
func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error {
|
||||
for {
|
||||
ai, err := s.AuthStore().AuthInfoFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if ai == nil {
|
||||
// chk expects non-nil AuthInfo; use empty credentials
|
||||
ai = &auth.AuthInfo{}
|
||||
}
|
||||
if err = chk(ai); err != nil {
|
||||
if err == auth.ErrAuthOldRevision {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
// fetch response for serialized request
|
||||
get()
|
||||
// empty credentials or current auth info means no need to retry
|
||||
if ai.Revision == 0 || ai.Revision == s.authStore.Revision() {
|
||||
return nil
|
||||
}
|
||||
// avoid TOCTOU error, retry of the request is required.
|
||||
}
|
||||
|
||||
if !authApplied {
|
||||
plog.Errorf("timeout of waiting Authenticate() RPC")
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *EtcdServer) usernameFromCtx(ctx context.Context) (string, error) {
|
||||
md, ok := metadata.FromContext(ctx)
|
||||
if !ok {
|
||||
return "", nil
|
||||
func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
|
||||
ai := s.getAppliedIndex()
|
||||
ci := s.getCommittedIndex()
|
||||
if ci > ai+maxGapBetweenApplyAndCommitIndex {
|
||||
return nil, ErrTooManyRequests
|
||||
}
|
||||
|
||||
ts, tok := md["token"]
|
||||
if !tok {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
token := ts[0]
|
||||
if !s.isValidSimpleToken(token) {
|
||||
return "", ErrInvalidAuthToken
|
||||
}
|
||||
|
||||
username, uok := s.AuthStore().UsernameFromToken(token)
|
||||
if !uok {
|
||||
plog.Warningf("invalid auth token: %s", token)
|
||||
return "", ErrInvalidAuthToken
|
||||
}
|
||||
return username, nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
|
||||
r.Header = &pb.RequestHeader{
|
||||
ID: s.reqIDGen.Next(),
|
||||
}
|
||||
username, err := s.usernameFromCtx(ctx)
|
||||
|
||||
authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.Header.Username = username
|
||||
if authInfo != nil {
|
||||
r.Header.Username = authInfo.Username
|
||||
r.Header.AuthRevision = authInfo.Revision
|
||||
}
|
||||
|
||||
data, err := r.Marshal()
|
||||
if err != nil {
|
||||
@@ -546,7 +676,7 @@ func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.Intern
|
||||
}
|
||||
ch := s.w.Register(id)
|
||||
|
||||
cctx, cancel := context.WithTimeout(ctx, maxV3RequestTimeout)
|
||||
cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
|
||||
defer cancel()
|
||||
|
||||
start := time.Now()
|
||||
@@ -566,5 +696,109 @@ func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.Intern
|
||||
}
|
||||
}
|
||||
|
||||
func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
|
||||
var result *applyResult
|
||||
var err error
|
||||
for {
|
||||
result, err = s.processInternalRaftRequestOnce(ctx, r)
|
||||
if err != auth.ErrAuthOldRevision {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Watchable returns a watchable interface attached to the etcdserver.
|
||||
func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() }
|
||||
|
||||
func (s *EtcdServer) linearizableReadLoop() {
|
||||
var rs raft.ReadState
|
||||
|
||||
for {
|
||||
ctx := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(ctx, s.reqIDGen.Next())
|
||||
|
||||
select {
|
||||
case <-s.readwaitc:
|
||||
case <-s.stopping:
|
||||
return
|
||||
}
|
||||
|
||||
nextnr := newNotifier()
|
||||
|
||||
s.readMu.Lock()
|
||||
nr := s.readNotifier
|
||||
s.readNotifier = nextnr
|
||||
s.readMu.Unlock()
|
||||
|
||||
cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
|
||||
if err := s.r.ReadIndex(cctx, ctx); err != nil {
|
||||
cancel()
|
||||
if err == raft.ErrStopped {
|
||||
return
|
||||
}
|
||||
plog.Errorf("failed to get read index from raft: %v", err)
|
||||
nr.notify(err)
|
||||
continue
|
||||
}
|
||||
cancel()
|
||||
|
||||
var (
|
||||
timeout bool
|
||||
done bool
|
||||
)
|
||||
for !timeout && !done {
|
||||
select {
|
||||
case rs = <-s.r.readStateC:
|
||||
done = bytes.Equal(rs.RequestCtx, ctx)
|
||||
if !done {
|
||||
// a previous request might time out. now we should ignore the response of it and
|
||||
// continue waiting for the response of the current requests.
|
||||
plog.Warningf("ignored out-of-date read index response (want %v, got %v)", rs.RequestCtx, ctx)
|
||||
}
|
||||
case <-time.After(s.Cfg.ReqTimeout()):
|
||||
plog.Warningf("timed out waiting for read index response")
|
||||
nr.notify(ErrTimeout)
|
||||
timeout = true
|
||||
case <-s.stopping:
|
||||
return
|
||||
}
|
||||
}
|
||||
if !done {
|
||||
continue
|
||||
}
|
||||
|
||||
if ai := s.getAppliedIndex(); ai < rs.Index {
|
||||
select {
|
||||
case <-s.applyWait.Wait(rs.Index):
|
||||
case <-s.stopping:
|
||||
return
|
||||
}
|
||||
}
|
||||
// unblock all l-reads requested at indices before rs.Index
|
||||
nr.notify(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error {
|
||||
s.readMu.RLock()
|
||||
nc := s.readNotifier
|
||||
s.readMu.RUnlock()
|
||||
|
||||
// signal linearizable loop for current notify if it hasn't been already
|
||||
select {
|
||||
case s.readwaitc <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
// wait for read state notification
|
||||
select {
|
||||
case <-nc.c:
|
||||
return nc.err
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-s.done:
|
||||
return ErrStopped
|
||||
}
|
||||
}
|
||||
|
||||
3
vendor/github.com/coreos/etcd/integration/bridge.go
generated
vendored
3
vendor/github.com/coreos/etcd/integration/bridge.go
generated
vendored
@@ -39,7 +39,8 @@ type bridge struct {
|
||||
|
||||
func newBridge(addr string) (*bridge, error) {
|
||||
b := &bridge{
|
||||
inaddr: addr + ".bridge",
|
||||
// bridge "port" is ("%05d%05d0", port, pid) since go1.8 expects the port to be a number
|
||||
inaddr: addr + "0",
|
||||
outaddr: addr,
|
||||
conns: make(map[*bridgeConn]struct{}),
|
||||
stopc: make(chan struct{}, 1),
|
||||
|
||||
118
vendor/github.com/coreos/etcd/integration/cluster.go
generated
vendored
118
vendor/github.com/coreos/etcd/integration/cluster.go
generated
vendored
@@ -276,12 +276,18 @@ func (c *cluster) AddMember(t *testing.T) {
|
||||
}
|
||||
|
||||
func (c *cluster) RemoveMember(t *testing.T, id uint64) {
|
||||
if err := c.removeMember(t, id); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cluster) removeMember(t *testing.T, id uint64) error {
|
||||
// send remove request to the cluster
|
||||
cc := MustNewHTTPClient(t, c.URLs(), c.cfg.ClientTLS)
|
||||
ma := client.NewMembersAPI(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
if err := ma.Remove(ctx, types.ID(id).String()); err != nil {
|
||||
t.Fatalf("unexpected remove error %v", err)
|
||||
return err
|
||||
}
|
||||
cancel()
|
||||
newMembers := make([]*member, 0)
|
||||
@@ -302,6 +308,7 @@ func (c *cluster) RemoveMember(t *testing.T, id uint64) {
|
||||
}
|
||||
c.Members = newMembers
|
||||
c.waitMembersMatch(t, c.HTTPMembers())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *cluster) Terminate(t *testing.T) {
|
||||
@@ -329,6 +336,7 @@ func (c *cluster) waitMembersMatch(t *testing.T, membs []client.Member) {
|
||||
|
||||
func (c *cluster) WaitLeader(t *testing.T) int { return c.waitLeader(t, c.Members) }
|
||||
|
||||
// waitLeader waits until given members agree on the same leader.
|
||||
func (c *cluster) waitLeader(t *testing.T, membs []*member) int {
|
||||
possibleLead := make(map[uint64]bool)
|
||||
var lead uint64
|
||||
@@ -362,6 +370,28 @@ func (c *cluster) waitLeader(t *testing.T, membs []*member) int {
|
||||
return -1
|
||||
}
|
||||
|
||||
func (c *cluster) WaitNoLeader(t *testing.T) { c.waitNoLeader(t, c.Members) }
|
||||
|
||||
// waitNoLeader waits until given members lose leader.
|
||||
func (c *cluster) waitNoLeader(t *testing.T, membs []*member) {
|
||||
noLeader := false
|
||||
for !noLeader {
|
||||
noLeader = true
|
||||
for _, m := range membs {
|
||||
select {
|
||||
case <-m.s.StopNotify():
|
||||
continue
|
||||
default:
|
||||
}
|
||||
if m.s.Lead() != 0 {
|
||||
noLeader = false
|
||||
time.Sleep(10 * tickDuration)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cluster) waitVersion() {
|
||||
for _, m := range c.Members {
|
||||
for {
|
||||
@@ -374,7 +404,7 @@ func (c *cluster) waitVersion() {
|
||||
}
|
||||
|
||||
func (c *cluster) name(i int) string {
|
||||
return fmt.Sprint("node", i)
|
||||
return fmt.Sprint(i)
|
||||
}
|
||||
|
||||
// isMembersEqual checks whether two members equal except ID field.
|
||||
@@ -390,7 +420,8 @@ func isMembersEqual(membs []client.Member, wmembs []client.Member) bool {
|
||||
|
||||
func newLocalListener(t *testing.T) net.Listener {
|
||||
c := atomic.AddInt64(&localListenCount, 1)
|
||||
addr := fmt.Sprintf("127.0.0.1:%d.%d.sock", c+basePort, os.Getpid())
|
||||
// Go 1.8+ allows only numbers in port
|
||||
addr := fmt.Sprintf("127.0.0.1:%05d%05d", c+basePort, os.Getpid())
|
||||
return NewListenerWithAddr(t, addr)
|
||||
}
|
||||
|
||||
@@ -418,6 +449,8 @@ type member struct {
|
||||
grpcServer *grpc.Server
|
||||
grpcAddr string
|
||||
grpcBridge *bridge
|
||||
|
||||
keepDataDirTerminate bool
|
||||
}
|
||||
|
||||
func (m *member) GRPCAddr() string { return m.grpcAddr }
|
||||
@@ -480,7 +513,7 @@ func mustNewMember(t *testing.T, mcfg memberConfig) *member {
|
||||
// listenGRPC starts a grpc server over a unix domain socket on the member
|
||||
func (m *member) listenGRPC() error {
|
||||
// prefix with localhost so cert has right domain
|
||||
m.grpcAddr = "localhost:" + m.Name + ".sock"
|
||||
m.grpcAddr = "localhost:" + m.Name
|
||||
l, err := transport.NewUnixListener(m.grpcAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("listen failed on grpc socket %s (%v)", m.grpcAddr, err)
|
||||
@@ -495,6 +528,10 @@ func (m *member) listenGRPC() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *member) electionTimeout() time.Duration {
|
||||
return time.Duration(m.s.Cfg.ElectionTicks) * time.Millisecond
|
||||
}
|
||||
|
||||
func (m *member) DropConnections() { m.grpcBridge.Reset() }
|
||||
|
||||
// NewClientV3 creates a new grpc client connection to the member
|
||||
@@ -515,7 +552,7 @@ func NewClientV3(m *member) (*clientv3.Client, error) {
|
||||
}
|
||||
cfg.TLS = tls
|
||||
}
|
||||
return clientv3.New(cfg)
|
||||
return newClientV3(cfg)
|
||||
}
|
||||
|
||||
// Clone returns a member with the same server configuration. The returned
|
||||
@@ -653,7 +690,7 @@ func (m *member) Close() {
|
||||
m.grpcServer.Stop()
|
||||
m.grpcServer = nil
|
||||
}
|
||||
m.s.Stop()
|
||||
m.s.HardStop()
|
||||
for _, hs := range m.hss {
|
||||
hs.CloseClientConnections()
|
||||
hs.Close()
|
||||
@@ -668,6 +705,15 @@ func (m *member) Stop(t *testing.T) {
|
||||
plog.Printf("stopped %s (%s)", m.Name, m.grpcAddr)
|
||||
}
|
||||
|
||||
// checkLeaderTransition waits for leader transition, returning the new leader ID.
|
||||
func checkLeaderTransition(t *testing.T, m *member, oldLead uint64) uint64 {
|
||||
interval := time.Duration(m.s.Cfg.TickMs) * time.Millisecond
|
||||
for m.s.Lead() == 0 || (m.s.Lead() == oldLead) {
|
||||
time.Sleep(interval)
|
||||
}
|
||||
return m.s.Lead()
|
||||
}
|
||||
|
||||
// StopNotify unblocks when a member stop completes
|
||||
func (m *member) StopNotify() <-chan struct{} {
|
||||
return m.s.StopNotify()
|
||||
@@ -702,12 +748,56 @@ func (m *member) Restart(t *testing.T) error {
|
||||
func (m *member) Terminate(t *testing.T) {
|
||||
plog.Printf("terminating %s (%s)", m.Name, m.grpcAddr)
|
||||
m.Close()
|
||||
if err := os.RemoveAll(m.ServerConfig.DataDir); err != nil {
|
||||
t.Fatal(err)
|
||||
if !m.keepDataDirTerminate {
|
||||
if err := os.RemoveAll(m.ServerConfig.DataDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
plog.Printf("terminated %s (%s)", m.Name, m.grpcAddr)
|
||||
}
|
||||
|
||||
// Metric gets the metric value for a member
|
||||
func (m *member) Metric(metricName string) (string, error) {
|
||||
cfgtls := transport.TLSInfo{}
|
||||
tr, err := transport.NewTimeoutTransport(cfgtls, time.Second, time.Second, time.Second)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
cli := &http.Client{Transport: tr}
|
||||
resp, err := cli.Get(m.ClientURLs[0].String() + "/metrics")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
b, rerr := ioutil.ReadAll(resp.Body)
|
||||
if rerr != nil {
|
||||
return "", rerr
|
||||
}
|
||||
lines := strings.Split(string(b), "\n")
|
||||
for _, l := range lines {
|
||||
if strings.HasPrefix(l, metricName) {
|
||||
return strings.Split(l, " ")[1], nil
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// InjectPartition drops connections from m to others, vice versa.
|
||||
func (m *member) InjectPartition(t *testing.T, others []*member) {
|
||||
for _, other := range others {
|
||||
m.s.CutPeer(other.s.ID())
|
||||
other.s.CutPeer(m.s.ID())
|
||||
}
|
||||
}
|
||||
|
||||
// RecoverPartition recovers connections from m to others, vice versa.
|
||||
func (m *member) RecoverPartition(t *testing.T, others []*member) {
|
||||
for _, other := range others {
|
||||
m.s.MendPeer(other.s.ID())
|
||||
other.s.MendPeer(m.s.ID())
|
||||
}
|
||||
}
|
||||
|
||||
func MustNewHTTPClient(t *testing.T, eps []string, tls *transport.TLSInfo) client.Client {
|
||||
cfgtls := transport.TLSInfo{}
|
||||
if tls != nil {
|
||||
@@ -803,14 +893,6 @@ type grpcAPI struct {
|
||||
Watch pb.WatchClient
|
||||
// Maintenance is the maintenance API for the client's connection.
|
||||
Maintenance pb.MaintenanceClient
|
||||
}
|
||||
|
||||
func toGRPC(c *clientv3.Client) grpcAPI {
|
||||
return grpcAPI{
|
||||
pb.NewClusterClient(c.ActiveConnection()),
|
||||
pb.NewKVClient(c.ActiveConnection()),
|
||||
pb.NewLeaseClient(c.ActiveConnection()),
|
||||
pb.NewWatchClient(c.ActiveConnection()),
|
||||
pb.NewMaintenanceClient(c.ActiveConnection()),
|
||||
}
|
||||
// Auth is the authentication API for the client's connection.
|
||||
Auth pb.AuthClient
|
||||
}
|
||||
|
||||
37
vendor/github.com/coreos/etcd/integration/cluster_direct.go
generated
vendored
Normal file
37
vendor/github.com/coreos/etcd/integration/cluster_direct.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !cluster_proxy
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
)
|
||||
|
||||
func toGRPC(c *clientv3.Client) grpcAPI {
|
||||
return grpcAPI{
|
||||
pb.NewClusterClient(c.ActiveConnection()),
|
||||
pb.NewKVClient(c.ActiveConnection()),
|
||||
pb.NewLeaseClient(c.ActiveConnection()),
|
||||
pb.NewWatchClient(c.ActiveConnection()),
|
||||
pb.NewMaintenanceClient(c.ActiveConnection()),
|
||||
pb.NewAuthClient(c.ActiveConnection()),
|
||||
}
|
||||
}
|
||||
|
||||
func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) {
|
||||
return clientv3.New(cfg)
|
||||
}
|
||||
89
vendor/github.com/coreos/etcd/integration/cluster_proxy.go
generated
vendored
Normal file
89
vendor/github.com/coreos/etcd/integration/cluster_proxy.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build cluster_proxy
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/proxy/grpcproxy"
|
||||
)
|
||||
|
||||
var (
|
||||
pmu sync.Mutex
|
||||
proxies map[*clientv3.Client]grpcClientProxy = make(map[*clientv3.Client]grpcClientProxy)
|
||||
)
|
||||
|
||||
type grpcClientProxy struct {
|
||||
grpc grpcAPI
|
||||
wdonec <-chan struct{}
|
||||
kvdonec <-chan struct{}
|
||||
}
|
||||
|
||||
func toGRPC(c *clientv3.Client) grpcAPI {
|
||||
pmu.Lock()
|
||||
defer pmu.Unlock()
|
||||
|
||||
if v, ok := proxies[c]; ok {
|
||||
return v.grpc
|
||||
}
|
||||
|
||||
wp, wpch := grpcproxy.NewWatchProxy(c)
|
||||
kvp, kvpch := grpcproxy.NewKvProxy(c)
|
||||
grpc := grpcAPI{
|
||||
pb.NewClusterClient(c.ActiveConnection()),
|
||||
grpcproxy.KvServerToKvClient(kvp),
|
||||
pb.NewLeaseClient(c.ActiveConnection()),
|
||||
grpcproxy.WatchServerToWatchClient(wp),
|
||||
pb.NewMaintenanceClient(c.ActiveConnection()),
|
||||
pb.NewAuthClient(c.ActiveConnection()),
|
||||
}
|
||||
proxies[c] = grpcClientProxy{grpc: grpc, wdonec: wpch, kvdonec: kvpch}
|
||||
return grpc
|
||||
}
|
||||
|
||||
type proxyCloser struct {
|
||||
clientv3.Watcher
|
||||
wdonec <-chan struct{}
|
||||
kvdonec <-chan struct{}
|
||||
}
|
||||
|
||||
func (pc *proxyCloser) Close() error {
|
||||
// client ctx is canceled before calling close, so kv will close out
|
||||
<-pc.kvdonec
|
||||
err := pc.Watcher.Close()
|
||||
<-pc.wdonec
|
||||
return err
|
||||
}
|
||||
|
||||
func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) {
|
||||
c, err := clientv3.New(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rpc := toGRPC(c)
|
||||
c.KV = clientv3.NewKVFromKVClient(rpc.KV)
|
||||
pmu.Lock()
|
||||
c.Watcher = &proxyCloser{
|
||||
Watcher: clientv3.NewWatchFromWatchClient(rpc.Watch),
|
||||
wdonec: proxies[c].wdonec,
|
||||
kvdonec: proxies[c].kvdonec,
|
||||
}
|
||||
pmu.Unlock()
|
||||
return c, nil
|
||||
}
|
||||
186
vendor/github.com/coreos/etcd/lease/leasehttp/http.go
generated
vendored
186
vendor/github.com/coreos/etcd/lease/leasehttp/http.go
generated
vendored
@@ -16,21 +16,35 @@ package leasehttp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/lease"
|
||||
"github.com/coreos/etcd/lease/leasepb"
|
||||
"github.com/coreos/etcd/pkg/httputil"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
LeasePrefix = "/leases"
|
||||
LeaseInternalPrefix = "/leases/internal"
|
||||
applyTimeout = time.Second
|
||||
ErrLeaseHTTPTimeout = errors.New("waiting for node to catch up its applied index has timed out")
|
||||
)
|
||||
|
||||
// NewHandler returns an http Handler for lease renewals
|
||||
func NewHandler(l lease.Lessor) http.Handler {
|
||||
return &leaseHandler{l}
|
||||
func NewHandler(l lease.Lessor, waitch func() <-chan struct{}) http.Handler {
|
||||
return &leaseHandler{l, waitch}
|
||||
}
|
||||
|
||||
type leaseHandler struct{ l lease.Lessor }
|
||||
type leaseHandler struct {
|
||||
l lease.Lessor
|
||||
waitch func() <-chan struct{}
|
||||
}
|
||||
|
||||
func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != "POST" {
|
||||
@@ -44,28 +58,81 @@ func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
lreq := pb.LeaseKeepAliveRequest{}
|
||||
if err := lreq.Unmarshal(b); err != nil {
|
||||
http.Error(w, "error unmarshalling request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
var v []byte
|
||||
switch r.URL.Path {
|
||||
case LeasePrefix:
|
||||
lreq := pb.LeaseKeepAliveRequest{}
|
||||
if err := lreq.Unmarshal(b); err != nil {
|
||||
http.Error(w, "error unmarshalling request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-h.waitch():
|
||||
case <-time.After(applyTimeout):
|
||||
http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout)
|
||||
return
|
||||
}
|
||||
ttl, err := h.l.Renew(lease.LeaseID(lreq.ID))
|
||||
if err != nil {
|
||||
if err == lease.ErrLeaseNotFound {
|
||||
http.Error(w, err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
ttl, err := h.l.Renew(lease.LeaseID(lreq.ID))
|
||||
if err != nil {
|
||||
if err == lease.ErrLeaseNotFound {
|
||||
http.Error(w, err.Error(), http.StatusNotFound)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
// TODO: fill out ResponseHeader
|
||||
resp := &pb.LeaseKeepAliveResponse{ID: lreq.ID, TTL: ttl}
|
||||
v, err = resp.Marshal()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
case LeaseInternalPrefix:
|
||||
lreq := leasepb.LeaseInternalRequest{}
|
||||
if err := lreq.Unmarshal(b); err != nil {
|
||||
http.Error(w, "error unmarshalling request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-h.waitch():
|
||||
case <-time.After(applyTimeout):
|
||||
http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout)
|
||||
return
|
||||
}
|
||||
l := h.l.Lookup(lease.LeaseID(lreq.LeaseTimeToLiveRequest.ID))
|
||||
if l == nil {
|
||||
http.Error(w, lease.ErrLeaseNotFound.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
// TODO: fill out ResponseHeader
|
||||
resp := &leasepb.LeaseInternalResponse{
|
||||
LeaseTimeToLiveResponse: &pb.LeaseTimeToLiveResponse{
|
||||
Header: &pb.ResponseHeader{},
|
||||
ID: lreq.LeaseTimeToLiveRequest.ID,
|
||||
TTL: int64(l.Remaining().Seconds()),
|
||||
GrantedTTL: l.TTL(),
|
||||
},
|
||||
}
|
||||
if lreq.LeaseTimeToLiveRequest.Keys {
|
||||
ks := l.Keys()
|
||||
kbs := make([][]byte, len(ks))
|
||||
for i := range ks {
|
||||
kbs[i] = []byte(ks[i])
|
||||
}
|
||||
resp.LeaseTimeToLiveResponse.Keys = kbs
|
||||
}
|
||||
|
||||
// TODO: fill out ResponseHeader
|
||||
resp := &pb.LeaseKeepAliveResponse{ID: lreq.ID, TTL: ttl}
|
||||
v, err := resp.Marshal()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
v, err = resp.Marshal()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
default:
|
||||
http.Error(w, fmt.Sprintf("unknown request path %q", r.URL.Path), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -92,15 +159,17 @@ func RenewHTTP(ctx context.Context, id lease.LeaseID, url string, rt http.RoundT
|
||||
|
||||
resp, err := cc.Do(req)
|
||||
if err != nil {
|
||||
// TODO detect if leader failed and retry?
|
||||
return -1, err
|
||||
}
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
b, err := readResponse(resp)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusRequestTimeout {
|
||||
return -1, ErrLeaseHTTPTimeout
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return -1, lease.ErrLeaseNotFound
|
||||
}
|
||||
@@ -118,3 +187,74 @@ func RenewHTTP(ctx context.Context, id lease.LeaseID, url string, rt http.RoundT
|
||||
}
|
||||
return lresp.TTL, nil
|
||||
}
|
||||
|
||||
// TimeToLiveHTTP retrieves lease information of the given lease ID.
|
||||
func TimeToLiveHTTP(ctx context.Context, id lease.LeaseID, keys bool, url string, rt http.RoundTripper) (*leasepb.LeaseInternalResponse, error) {
|
||||
// will post lreq protobuf to leader
|
||||
lreq, err := (&leasepb.LeaseInternalRequest{&pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: keys}}).Marshal()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", url, bytes.NewReader(lreq))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/protobuf")
|
||||
|
||||
cancel := httputil.RequestCanceler(req)
|
||||
|
||||
cc := &http.Client{Transport: rt}
|
||||
var b []byte
|
||||
// buffer errc channel so that errc don't block inside the go routinue
|
||||
errc := make(chan error, 2)
|
||||
go func() {
|
||||
resp, err := cc.Do(req)
|
||||
if err != nil {
|
||||
errc <- err
|
||||
return
|
||||
}
|
||||
b, err = readResponse(resp)
|
||||
if err != nil {
|
||||
errc <- err
|
||||
return
|
||||
}
|
||||
if resp.StatusCode == http.StatusRequestTimeout {
|
||||
errc <- ErrLeaseHTTPTimeout
|
||||
return
|
||||
}
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
errc <- lease.ErrLeaseNotFound
|
||||
return
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
errc <- fmt.Errorf("lease: unknown error(%s)", string(b))
|
||||
return
|
||||
}
|
||||
errc <- nil
|
||||
}()
|
||||
select {
|
||||
case derr := <-errc:
|
||||
if derr != nil {
|
||||
return nil, derr
|
||||
}
|
||||
case <-ctx.Done():
|
||||
cancel()
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
lresp := &leasepb.LeaseInternalResponse{}
|
||||
if err := lresp.Unmarshal(b); err != nil {
|
||||
return nil, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b))
|
||||
}
|
||||
if lresp.LeaseTimeToLiveResponse.ID != int64(id) {
|
||||
return nil, fmt.Errorf("lease: renew id mismatch")
|
||||
}
|
||||
return lresp, nil
|
||||
}
|
||||
|
||||
func readResponse(resp *http.Response) (b []byte, err error) {
|
||||
b, err = ioutil.ReadAll(resp.Body)
|
||||
httputil.GracefulClose(resp)
|
||||
return
|
||||
}
|
||||
|
||||
375
vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go
generated
vendored
375
vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go
generated
vendored
@@ -10,6 +10,8 @@
|
||||
|
||||
It has these top-level messages:
|
||||
Lease
|
||||
LeaseInternalRequest
|
||||
LeaseInternalResponse
|
||||
*/
|
||||
package leasepb
|
||||
|
||||
@@ -20,6 +22,8 @@ import (
|
||||
|
||||
math "math"
|
||||
|
||||
etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
io "io"
|
||||
)
|
||||
|
||||
@@ -30,11 +34,13 @@ var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
const _ = proto.ProtoPackageIsVersion1
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type Lease struct {
|
||||
ID int64 `protobuf:"varint,1,opt,name=ID,json=iD,proto3" json:"ID,omitempty"`
|
||||
TTL int64 `protobuf:"varint,2,opt,name=TTL,json=tTL,proto3" json:"TTL,omitempty"`
|
||||
ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
|
||||
TTL int64 `protobuf:"varint,2,opt,name=TTL,proto3" json:"TTL,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Lease) Reset() { *m = Lease{} }
|
||||
@@ -42,62 +48,138 @@ func (m *Lease) String() string { return proto.CompactTextString(m) }
|
||||
func (*Lease) ProtoMessage() {}
|
||||
func (*Lease) Descriptor() ([]byte, []int) { return fileDescriptorLease, []int{0} }
|
||||
|
||||
type LeaseInternalRequest struct {
|
||||
LeaseTimeToLiveRequest *etcdserverpb.LeaseTimeToLiveRequest `protobuf:"bytes,1,opt,name=LeaseTimeToLiveRequest" json:"LeaseTimeToLiveRequest,omitempty"`
|
||||
}
|
||||
|
||||
func (m *LeaseInternalRequest) Reset() { *m = LeaseInternalRequest{} }
|
||||
func (m *LeaseInternalRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*LeaseInternalRequest) ProtoMessage() {}
|
||||
func (*LeaseInternalRequest) Descriptor() ([]byte, []int) { return fileDescriptorLease, []int{1} }
|
||||
|
||||
type LeaseInternalResponse struct {
|
||||
LeaseTimeToLiveResponse *etcdserverpb.LeaseTimeToLiveResponse `protobuf:"bytes,1,opt,name=LeaseTimeToLiveResponse" json:"LeaseTimeToLiveResponse,omitempty"`
|
||||
}
|
||||
|
||||
func (m *LeaseInternalResponse) Reset() { *m = LeaseInternalResponse{} }
|
||||
func (m *LeaseInternalResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*LeaseInternalResponse) ProtoMessage() {}
|
||||
func (*LeaseInternalResponse) Descriptor() ([]byte, []int) { return fileDescriptorLease, []int{2} }
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Lease)(nil), "leasepb.Lease")
|
||||
proto.RegisterType((*LeaseInternalRequest)(nil), "leasepb.LeaseInternalRequest")
|
||||
proto.RegisterType((*LeaseInternalResponse)(nil), "leasepb.LeaseInternalResponse")
|
||||
}
|
||||
func (m *Lease) Marshal() (data []byte, err error) {
|
||||
func (m *Lease) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Lease) MarshalTo(data []byte) (int, error) {
|
||||
func (m *Lease) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.ID != 0 {
|
||||
data[i] = 0x8
|
||||
dAtA[i] = 0x8
|
||||
i++
|
||||
i = encodeVarintLease(data, i, uint64(m.ID))
|
||||
i = encodeVarintLease(dAtA, i, uint64(m.ID))
|
||||
}
|
||||
if m.TTL != 0 {
|
||||
data[i] = 0x10
|
||||
dAtA[i] = 0x10
|
||||
i++
|
||||
i = encodeVarintLease(data, i, uint64(m.TTL))
|
||||
i = encodeVarintLease(dAtA, i, uint64(m.TTL))
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Lease(data []byte, offset int, v uint64) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
data[offset+4] = uint8(v >> 32)
|
||||
data[offset+5] = uint8(v >> 40)
|
||||
data[offset+6] = uint8(v >> 48)
|
||||
data[offset+7] = uint8(v >> 56)
|
||||
func (m *LeaseInternalRequest) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *LeaseInternalRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.LeaseTimeToLiveRequest != nil {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintLease(dAtA, i, uint64(m.LeaseTimeToLiveRequest.Size()))
|
||||
n1, err := m.LeaseTimeToLiveRequest.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n1
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *LeaseInternalResponse) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *LeaseInternalResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.LeaseTimeToLiveResponse != nil {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintLease(dAtA, i, uint64(m.LeaseTimeToLiveResponse.Size()))
|
||||
n2, err := m.LeaseTimeToLiveResponse.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n2
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Lease(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Lease(data []byte, offset int, v uint32) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
func encodeFixed32Lease(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintLease(data []byte, offset int, v uint64) int {
|
||||
func encodeVarintLease(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
data[offset] = uint8(v&0x7f | 0x80)
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
data[offset] = uint8(v)
|
||||
dAtA[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func (m *Lease) Size() (n int) {
|
||||
@@ -112,6 +194,26 @@ func (m *Lease) Size() (n int) {
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *LeaseInternalRequest) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if m.LeaseTimeToLiveRequest != nil {
|
||||
l = m.LeaseTimeToLiveRequest.Size()
|
||||
n += 1 + l + sovLease(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *LeaseInternalResponse) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if m.LeaseTimeToLiveResponse != nil {
|
||||
l = m.LeaseTimeToLiveResponse.Size()
|
||||
n += 1 + l + sovLease(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovLease(x uint64) (n int) {
|
||||
for {
|
||||
n++
|
||||
@@ -125,8 +227,8 @@ func sovLease(x uint64) (n int) {
|
||||
func sozLease(x uint64) (n int) {
|
||||
return sovLease(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *Lease) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
func (m *Lease) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
@@ -138,7 +240,7 @@ func (m *Lease) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -166,7 +268,7 @@ func (m *Lease) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.ID |= (int64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -185,7 +287,7 @@ func (m *Lease) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.TTL |= (int64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -194,7 +296,7 @@ func (m *Lease) Unmarshal(data []byte) error {
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipLease(data[iNdEx:])
|
||||
skippy, err := skipLease(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -213,8 +315,174 @@ func (m *Lease) Unmarshal(data []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipLease(data []byte) (n int, err error) {
|
||||
l := len(data)
|
||||
func (m *LeaseInternalRequest) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowLease
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: LeaseInternalRequest: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: LeaseInternalRequest: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field LeaseTimeToLiveRequest", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowLease
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthLease
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.LeaseTimeToLiveRequest == nil {
|
||||
m.LeaseTimeToLiveRequest = &etcdserverpb.LeaseTimeToLiveRequest{}
|
||||
}
|
||||
if err := m.LeaseTimeToLiveRequest.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipLease(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthLease
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *LeaseInternalResponse) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowLease
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: LeaseInternalResponse: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: LeaseInternalResponse: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field LeaseTimeToLiveResponse", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowLease
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthLease
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.LeaseTimeToLiveResponse == nil {
|
||||
m.LeaseTimeToLiveResponse = &etcdserverpb.LeaseTimeToLiveResponse{}
|
||||
}
|
||||
if err := m.LeaseTimeToLiveResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipLease(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthLease
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipLease(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
@@ -225,7 +493,7 @@ func skipLease(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -243,7 +511,7 @@ func skipLease(data []byte) (n int, err error) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if data[iNdEx-1] < 0x80 {
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -260,7 +528,7 @@ func skipLease(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -283,7 +551,7 @@ func skipLease(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -294,7 +562,7 @@ func skipLease(data []byte) (n int, err error) {
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipLease(data[start:])
|
||||
next, err := skipLease(dAtA[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -318,14 +586,23 @@ var (
|
||||
ErrIntOverflowLease = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() { proto.RegisterFile("lease.proto", fileDescriptorLease) }
|
||||
|
||||
var fileDescriptorLease = []byte{
|
||||
// 126 bytes of a gzipped FileDescriptorProto
|
||||
// 233 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c,
|
||||
0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07, 0x73, 0x0a, 0x92, 0xa4, 0x44, 0xd2,
|
||||
0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x49, 0x93, 0x8b, 0xd5, 0x07, 0xa4,
|
||||
0x40, 0x88, 0x8f, 0x8b, 0xc9, 0xd3, 0x45, 0x82, 0x51, 0x81, 0x51, 0x83, 0x39, 0x88, 0x29, 0xd3,
|
||||
0x45, 0x48, 0x80, 0x8b, 0x39, 0x24, 0xc4, 0x47, 0x82, 0x09, 0x2c, 0xc0, 0x5c, 0x12, 0xe2, 0xe3,
|
||||
0x24, 0x71, 0xe2, 0xa1, 0x1c, 0xc3, 0x85, 0x87, 0x72, 0x0c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78,
|
||||
0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8c, 0xc7, 0x72, 0x0c, 0x49, 0x6c, 0x60, 0xb3, 0x8c,
|
||||
0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0d, 0xa0, 0x42, 0x1a, 0x79, 0x00, 0x00, 0x00,
|
||||
0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x4a, 0x2d, 0xb5, 0x24, 0x39, 0x45,
|
||||
0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a, 0x92,
|
||||
0x21, 0xea, 0x94, 0x34, 0xb9, 0x58, 0x7d, 0x40, 0x06, 0x09, 0xf1, 0x71, 0x31, 0x79, 0xba, 0x48,
|
||||
0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0x31, 0x79, 0xba, 0x08, 0x09, 0x70, 0x31, 0x87, 0x84, 0xf8,
|
||||
0x48, 0x30, 0x81, 0x05, 0x40, 0x4c, 0xa5, 0x12, 0x2e, 0x11, 0xb0, 0x52, 0xcf, 0xbc, 0x92, 0xd4,
|
||||
0xa2, 0xbc, 0xc4, 0x9c, 0xa0, 0xd4, 0xc2, 0xd2, 0xd4, 0xe2, 0x12, 0xa1, 0x18, 0x2e, 0x31, 0xb0,
|
||||
0x78, 0x48, 0x66, 0x6e, 0x6a, 0x48, 0xbe, 0x4f, 0x66, 0x59, 0x2a, 0x54, 0x06, 0x6c, 0x1a, 0xb7,
|
||||
0x91, 0x8a, 0x1e, 0xb2, 0xdd, 0x7a, 0xd8, 0xd5, 0x06, 0xe1, 0x30, 0x43, 0xa9, 0x82, 0x4b, 0x14,
|
||||
0xcd, 0xd6, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0xa1, 0x78, 0x2e, 0x71, 0x0c, 0x2d, 0x10, 0x29,
|
||||
0xa8, 0xbd, 0xaa, 0x04, 0xec, 0x85, 0x28, 0x0e, 0xc2, 0x65, 0x8a, 0x93, 0xc4, 0x89, 0x87, 0x72,
|
||||
0x0c, 0x17, 0x1e, 0xca, 0x31, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47,
|
||||
0x72, 0x8c, 0x33, 0x1e, 0xcb, 0x31, 0x24, 0xb1, 0x81, 0xc3, 0xce, 0x18, 0x10, 0x00, 0x00, 0xff,
|
||||
0xff, 0x9f, 0xf2, 0x42, 0xe0, 0x91, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
9
vendor/github.com/coreos/etcd/lease/leasepb/lease.proto
generated
vendored
9
vendor/github.com/coreos/etcd/lease/leasepb/lease.proto
generated
vendored
@@ -2,6 +2,7 @@ syntax = "proto3";
|
||||
package leasepb;
|
||||
|
||||
import "gogoproto/gogo.proto";
|
||||
import "etcd/etcdserver/etcdserverpb/rpc.proto";
|
||||
|
||||
option (gogoproto.marshaler_all) = true;
|
||||
option (gogoproto.sizer_all) = true;
|
||||
@@ -13,3 +14,11 @@ message Lease {
|
||||
int64 ID = 1;
|
||||
int64 TTL = 2;
|
||||
}
|
||||
|
||||
message LeaseInternalRequest {
|
||||
etcdserverpb.LeaseTimeToLiveRequest LeaseTimeToLiveRequest = 1;
|
||||
}
|
||||
|
||||
message LeaseInternalResponse {
|
||||
etcdserverpb.LeaseTimeToLiveResponse LeaseTimeToLiveResponse = 1;
|
||||
}
|
||||
|
||||
189
vendor/github.com/coreos/etcd/lease/lessor.go
generated
vendored
189
vendor/github.com/coreos/etcd/lease/lessor.go
generated
vendored
@@ -18,11 +18,14 @@ import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/lease/leasepb"
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
"github.com/coreos/etcd/pkg/monotime"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -32,9 +35,8 @@ const (
|
||||
|
||||
var (
|
||||
leaseBucketName = []byte("lease")
|
||||
// do not use maxInt64 since it can overflow time which will add
|
||||
// the offset of unix time (1970yr to seconds).
|
||||
forever = time.Unix(math.MaxInt64>>1, 0)
|
||||
|
||||
forever = monotime.Time(math.MaxInt64)
|
||||
|
||||
ErrNotPrimary = errors.New("not a primary lessor")
|
||||
ErrLeaseNotFound = errors.New("lease not found")
|
||||
@@ -75,6 +77,10 @@ type Lessor interface {
|
||||
// If the lease does not exist, an error will be returned.
|
||||
Attach(id LeaseID, items []LeaseItem) error
|
||||
|
||||
// GetLease returns LeaseID for given item.
|
||||
// If no lease found, NoLease value will be returned.
|
||||
GetLease(item LeaseItem) LeaseID
|
||||
|
||||
// Detach detaches given leaseItem from the lease with given LeaseID.
|
||||
// If the lease does not exist, an error will be returned.
|
||||
Detach(id LeaseID, items []LeaseItem) error
|
||||
@@ -110,20 +116,9 @@ type Lessor interface {
|
||||
type lessor struct {
|
||||
mu sync.Mutex
|
||||
|
||||
// primary indicates if this lessor is the primary lessor. The primary
|
||||
// lessor manages lease expiration and renew.
|
||||
//
|
||||
// in etcd, raft leader is the primary. Thus there might be two primary
|
||||
// leaders at the same time (raft allows concurrent leader but with different term)
|
||||
// for at most a leader election timeout.
|
||||
// The old primary leader cannot affect the correctness since its proposal has a
|
||||
// smaller term and will not be committed.
|
||||
//
|
||||
// TODO: raft follower do not forward lease management proposals. There might be a
|
||||
// very small window (within second normally which depends on go scheduling) that
|
||||
// a raft follow is the primary between the raft leader demotion and lessor demotion.
|
||||
// Usually this should not be a problem. Lease should not be that sensitive to timing.
|
||||
primary bool
|
||||
// demotec is set when the lessor is the primary.
|
||||
// demotec will be closed if the lessor is demoted.
|
||||
demotec chan struct{}
|
||||
|
||||
// TODO: probably this should be a heap with a secondary
|
||||
// id index.
|
||||
@@ -133,6 +128,8 @@ type lessor struct {
|
||||
// findExpiredLeases and Renew should be the most frequent operations.
|
||||
leaseMap map[LeaseID]*Lease
|
||||
|
||||
itemMap map[LeaseItem]LeaseID
|
||||
|
||||
// When a lease expires, the lessor will delete the
|
||||
// leased range (or key) by the RangeDeleter.
|
||||
rd RangeDeleter
|
||||
@@ -159,6 +156,7 @@ func NewLessor(b backend.Backend, minLeaseTTL int64) Lessor {
|
||||
func newLessor(b backend.Backend, minLeaseTTL int64) *lessor {
|
||||
l := &lessor{
|
||||
leaseMap: make(map[LeaseID]*Lease),
|
||||
itemMap: make(map[LeaseItem]LeaseID),
|
||||
b: b,
|
||||
minLeaseTTL: minLeaseTTL,
|
||||
// expiredC is a small buffered chan to avoid unnecessary blocking.
|
||||
@@ -173,6 +171,23 @@ func newLessor(b backend.Backend, minLeaseTTL int64) *lessor {
|
||||
return l
|
||||
}
|
||||
|
||||
// isPrimary indicates if this lessor is the primary lessor. The primary
|
||||
// lessor manages lease expiration and renew.
|
||||
//
|
||||
// in etcd, raft leader is the primary. Thus there might be two primary
|
||||
// leaders at the same time (raft allows concurrent leader but with different term)
|
||||
// for at most a leader election timeout.
|
||||
// The old primary leader cannot affect the correctness since its proposal has a
|
||||
// smaller term and will not be committed.
|
||||
//
|
||||
// TODO: raft follower do not forward lease management proposals. There might be a
|
||||
// very small window (within second normally which depends on go scheduling) that
|
||||
// a raft follow is the primary between the raft leader demotion and lessor demotion.
|
||||
// Usually this should not be a problem. Lease should not be that sensitive to timing.
|
||||
func (le *lessor) isPrimary() bool {
|
||||
return le.demotec != nil
|
||||
}
|
||||
|
||||
func (le *lessor) SetRangeDeleter(rd RangeDeleter) {
|
||||
le.mu.Lock()
|
||||
defer le.mu.Unlock()
|
||||
@@ -187,7 +202,12 @@ func (le *lessor) Grant(id LeaseID, ttl int64) (*Lease, error) {
|
||||
|
||||
// TODO: when lessor is under high load, it should give out lease
|
||||
// with longer TTL to reduce renew load.
|
||||
l := &Lease{ID: id, TTL: ttl, itemSet: make(map[LeaseItem]struct{})}
|
||||
l := &Lease{
|
||||
ID: id,
|
||||
ttl: ttl,
|
||||
itemSet: make(map[LeaseItem]struct{}),
|
||||
revokec: make(chan struct{}),
|
||||
}
|
||||
|
||||
le.mu.Lock()
|
||||
defer le.mu.Unlock()
|
||||
@@ -196,11 +216,11 @@ func (le *lessor) Grant(id LeaseID, ttl int64) (*Lease, error) {
|
||||
return nil, ErrLeaseExists
|
||||
}
|
||||
|
||||
if l.TTL < le.minLeaseTTL {
|
||||
l.TTL = le.minLeaseTTL
|
||||
if l.ttl < le.minLeaseTTL {
|
||||
l.ttl = le.minLeaseTTL
|
||||
}
|
||||
|
||||
if le.primary {
|
||||
if le.isPrimary() {
|
||||
l.refresh(0)
|
||||
} else {
|
||||
l.forever()
|
||||
@@ -220,6 +240,7 @@ func (le *lessor) Revoke(id LeaseID) error {
|
||||
le.mu.Unlock()
|
||||
return ErrLeaseNotFound
|
||||
}
|
||||
defer close(l.revokec)
|
||||
// unlock before doing external work
|
||||
le.mu.Unlock()
|
||||
|
||||
@@ -228,8 +249,13 @@ func (le *lessor) Revoke(id LeaseID) error {
|
||||
}
|
||||
|
||||
tid := le.rd.TxnBegin()
|
||||
for item := range l.itemSet {
|
||||
_, _, err := le.rd.TxnDeleteRange(tid, []byte(item.Key), nil)
|
||||
|
||||
// sort keys so deletes are in same order among all members,
|
||||
// otherwise the backened hashes will be different
|
||||
keys := l.Keys()
|
||||
sort.StringSlice(keys).Sort()
|
||||
for _, key := range keys {
|
||||
_, _, err := le.rd.TxnDeleteRange(tid, []byte(key), nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -255,36 +281,55 @@ func (le *lessor) Revoke(id LeaseID) error {
|
||||
// has expired, an error will be returned.
|
||||
func (le *lessor) Renew(id LeaseID) (int64, error) {
|
||||
le.mu.Lock()
|
||||
defer le.mu.Unlock()
|
||||
|
||||
if !le.primary {
|
||||
unlock := func() { le.mu.Unlock() }
|
||||
defer func() { unlock() }()
|
||||
|
||||
if !le.isPrimary() {
|
||||
// forward renew request to primary instead of returning error.
|
||||
return -1, ErrNotPrimary
|
||||
}
|
||||
|
||||
demotec := le.demotec
|
||||
|
||||
l := le.leaseMap[id]
|
||||
if l == nil {
|
||||
return -1, ErrLeaseNotFound
|
||||
}
|
||||
|
||||
if l.expired() {
|
||||
le.mu.Unlock()
|
||||
unlock = func() {}
|
||||
select {
|
||||
// A expired lease might be pending for revoking or going through
|
||||
// quorum to be revoked. To be accurate, renew request must wait for the
|
||||
// deletion to complete.
|
||||
case <-l.revokec:
|
||||
return -1, ErrLeaseNotFound
|
||||
// The expired lease might fail to be revoked if the primary changes.
|
||||
// The caller will retry on ErrNotPrimary.
|
||||
case <-demotec:
|
||||
return -1, ErrNotPrimary
|
||||
case <-le.stopC:
|
||||
return -1, ErrNotPrimary
|
||||
}
|
||||
}
|
||||
|
||||
l.refresh(0)
|
||||
return l.TTL, nil
|
||||
return l.ttl, nil
|
||||
}
|
||||
|
||||
func (le *lessor) Lookup(id LeaseID) *Lease {
|
||||
le.mu.Lock()
|
||||
defer le.mu.Unlock()
|
||||
if l, ok := le.leaseMap[id]; ok {
|
||||
return l
|
||||
}
|
||||
return nil
|
||||
return le.leaseMap[id]
|
||||
}
|
||||
|
||||
func (le *lessor) Promote(extend time.Duration) {
|
||||
le.mu.Lock()
|
||||
defer le.mu.Unlock()
|
||||
|
||||
le.primary = true
|
||||
le.demotec = make(chan struct{})
|
||||
|
||||
// refresh the expiries of all leases.
|
||||
for _, l := range le.leaseMap {
|
||||
@@ -301,7 +346,10 @@ func (le *lessor) Demote() {
|
||||
l.forever()
|
||||
}
|
||||
|
||||
le.primary = false
|
||||
if le.demotec != nil {
|
||||
close(le.demotec)
|
||||
le.demotec = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Attach attaches items to the lease with given ID. When the lease
|
||||
@@ -316,12 +364,22 @@ func (le *lessor) Attach(id LeaseID, items []LeaseItem) error {
|
||||
return ErrLeaseNotFound
|
||||
}
|
||||
|
||||
l.mu.Lock()
|
||||
for _, it := range items {
|
||||
l.itemSet[it] = struct{}{}
|
||||
le.itemMap[it] = id
|
||||
}
|
||||
l.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (le *lessor) GetLease(item LeaseItem) LeaseID {
|
||||
le.mu.Lock()
|
||||
id := le.itemMap[item]
|
||||
le.mu.Unlock()
|
||||
return id
|
||||
}
|
||||
|
||||
// Detach detaches items from the lease with given ID.
|
||||
// If the given lease does not exist, an error will be returned.
|
||||
func (le *lessor) Detach(id LeaseID, items []LeaseItem) error {
|
||||
@@ -333,9 +391,12 @@ func (le *lessor) Detach(id LeaseID, items []LeaseItem) error {
|
||||
return ErrLeaseNotFound
|
||||
}
|
||||
|
||||
l.mu.Lock()
|
||||
for _, it := range items {
|
||||
delete(l.itemSet, it)
|
||||
delete(le.itemMap, it)
|
||||
}
|
||||
l.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -346,7 +407,7 @@ func (le *lessor) Recover(b backend.Backend, rd RangeDeleter) {
|
||||
le.b = b
|
||||
le.rd = rd
|
||||
le.leaseMap = make(map[LeaseID]*Lease)
|
||||
|
||||
le.itemMap = make(map[LeaseItem]LeaseID)
|
||||
le.initAndRecover()
|
||||
}
|
||||
|
||||
@@ -366,7 +427,7 @@ func (le *lessor) runLoop() {
|
||||
var ls []*Lease
|
||||
|
||||
le.mu.Lock()
|
||||
if le.primary {
|
||||
if le.isPrimary() {
|
||||
ls = le.findExpiredLeases()
|
||||
}
|
||||
le.mu.Unlock()
|
||||
@@ -395,12 +456,11 @@ func (le *lessor) runLoop() {
|
||||
// leases that needed to be revoked.
|
||||
func (le *lessor) findExpiredLeases() []*Lease {
|
||||
leases := make([]*Lease, 0, 16)
|
||||
now := time.Now()
|
||||
|
||||
for _, l := range le.leaseMap {
|
||||
// TODO: probably should change to <= 100-500 millisecond to
|
||||
// make up committing latency.
|
||||
if l.expiry.Sub(now) <= 0 {
|
||||
if l.expired() {
|
||||
leases = append(leases, l)
|
||||
}
|
||||
}
|
||||
@@ -408,15 +468,6 @@ func (le *lessor) findExpiredLeases() []*Lease {
|
||||
return leases
|
||||
}
|
||||
|
||||
// get gets the lease with given id.
|
||||
// get is a helper function for testing, at least for now.
|
||||
func (le *lessor) get(id LeaseID) *Lease {
|
||||
le.mu.Lock()
|
||||
defer le.mu.Unlock()
|
||||
|
||||
return le.leaseMap[id]
|
||||
}
|
||||
|
||||
func (le *lessor) initAndRecover() {
|
||||
tx := le.b.BatchTx()
|
||||
tx.Lock()
|
||||
@@ -437,11 +488,12 @@ func (le *lessor) initAndRecover() {
|
||||
}
|
||||
le.leaseMap[ID] = &Lease{
|
||||
ID: ID,
|
||||
TTL: lpb.TTL,
|
||||
ttl: lpb.TTL,
|
||||
// itemSet will be filled in when recover key-value pairs
|
||||
// set expiry to forever, refresh when promoted
|
||||
itemSet: make(map[LeaseItem]struct{}),
|
||||
expiry: forever,
|
||||
revokec: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
tx.Unlock()
|
||||
@@ -451,17 +503,24 @@ func (le *lessor) initAndRecover() {
|
||||
|
||||
type Lease struct {
|
||||
ID LeaseID
|
||||
TTL int64 // time to live in seconds
|
||||
ttl int64 // time to live in seconds
|
||||
// expiry is time when lease should expire; must be 64-bit aligned.
|
||||
expiry monotime.Time
|
||||
|
||||
// mu protects concurrent accesses to itemSet
|
||||
mu sync.RWMutex
|
||||
itemSet map[LeaseItem]struct{}
|
||||
// expiry time in unixnano
|
||||
expiry time.Time
|
||||
revokec chan struct{}
|
||||
}
|
||||
|
||||
func (l Lease) persistTo(b backend.Backend) {
|
||||
func (l *Lease) expired() bool {
|
||||
return l.Remaining() <= 0
|
||||
}
|
||||
|
||||
func (l *Lease) persistTo(b backend.Backend) {
|
||||
key := int64ToBytes(int64(l.ID))
|
||||
|
||||
lpb := leasepb.Lease{ID: int64(l.ID), TTL: int64(l.TTL)}
|
||||
lpb := leasepb.Lease{ID: int64(l.ID), TTL: int64(l.ttl)}
|
||||
val, err := lpb.Marshal()
|
||||
if err != nil {
|
||||
panic("failed to marshal lease proto item")
|
||||
@@ -472,13 +531,36 @@ func (l Lease) persistTo(b backend.Backend) {
|
||||
b.BatchTx().Unlock()
|
||||
}
|
||||
|
||||
// TTL returns the TTL of the Lease.
|
||||
func (l *Lease) TTL() int64 {
|
||||
return l.ttl
|
||||
}
|
||||
|
||||
// refresh refreshes the expiry of the lease.
|
||||
func (l *Lease) refresh(extend time.Duration) {
|
||||
l.expiry = time.Now().Add(extend + time.Second*time.Duration(l.TTL))
|
||||
t := monotime.Now().Add(extend + time.Duration(l.ttl)*time.Second)
|
||||
atomic.StoreUint64((*uint64)(&l.expiry), uint64(t))
|
||||
}
|
||||
|
||||
// forever sets the expiry of lease to be forever.
|
||||
func (l *Lease) forever() { l.expiry = forever }
|
||||
func (l *Lease) forever() { atomic.StoreUint64((*uint64)(&l.expiry), uint64(forever)) }
|
||||
|
||||
// Keys returns all the keys attached to the lease.
|
||||
func (l *Lease) Keys() []string {
|
||||
l.mu.RLock()
|
||||
keys := make([]string, 0, len(l.itemSet))
|
||||
for k := range l.itemSet {
|
||||
keys = append(keys, k.Key)
|
||||
}
|
||||
l.mu.RUnlock()
|
||||
return keys
|
||||
}
|
||||
|
||||
// Remaining returns the remaining time of the lease.
|
||||
func (l *Lease) Remaining() time.Duration {
|
||||
t := monotime.Time(atomic.LoadUint64((*uint64)(&l.expiry)))
|
||||
return time.Duration(t - monotime.Now())
|
||||
}
|
||||
|
||||
type LeaseItem struct {
|
||||
Key string
|
||||
@@ -502,6 +584,7 @@ func (fl *FakeLessor) Revoke(id LeaseID) error { return nil }
|
||||
|
||||
func (fl *FakeLessor) Attach(id LeaseID, items []LeaseItem) error { return nil }
|
||||
|
||||
func (fl *FakeLessor) GetLease(item LeaseItem) LeaseID { return 0 }
|
||||
func (fl *FakeLessor) Detach(id LeaseID, items []LeaseItem) error { return nil }
|
||||
|
||||
func (fl *FakeLessor) Promote(extend time.Duration) {}
|
||||
|
||||
9
vendor/github.com/coreos/etcd/mvcc/backend/backend.go
generated
vendored
9
vendor/github.com/coreos/etcd/mvcc/backend/backend.go
generated
vendored
@@ -20,7 +20,7 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -40,7 +40,7 @@ var (
|
||||
// This only works for linux.
|
||||
InitialMmapSize = int64(10 * 1024 * 1024 * 1024)
|
||||
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/mvcc", "backend")
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc/backend")
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -303,6 +303,7 @@ func defragdb(odb, tmpdb *bolt.DB, limit int) error {
|
||||
}
|
||||
|
||||
tmpb, berr := tmptx.CreateBucketIfNotExists(next)
|
||||
tmpb.FillPercent = 0.9 // for seq write in for each
|
||||
if berr != nil {
|
||||
return berr
|
||||
}
|
||||
@@ -319,6 +320,8 @@ func defragdb(odb, tmpdb *bolt.DB, limit int) error {
|
||||
return err
|
||||
}
|
||||
tmpb = tmptx.Bucket(next)
|
||||
tmpb.FillPercent = 0.9 // for seq write in for each
|
||||
|
||||
count = 0
|
||||
}
|
||||
return tmpb.Put(k, v)
|
||||
@@ -334,7 +337,7 @@ func NewTmpBackend(batchInterval time.Duration, batchLimit int) (*backend, strin
|
||||
if err != nil {
|
||||
plog.Fatal(err)
|
||||
}
|
||||
tmpPath := path.Join(dir, "database")
|
||||
tmpPath := filepath.Join(dir, "database")
|
||||
return newBackend(tmpPath, batchInterval, batchLimit), tmpPath
|
||||
}
|
||||
|
||||
|
||||
17
vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go
generated
vendored
17
vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go
generated
vendored
@@ -162,11 +162,26 @@ func (t *batchTx) commit(stop bool) {
|
||||
if t.pending == 0 && !stop {
|
||||
t.backend.mu.RLock()
|
||||
defer t.backend.mu.RUnlock()
|
||||
atomic.StoreInt64(&t.backend.size, t.tx.Size())
|
||||
|
||||
// batchTx.commit(true) calls *bolt.Tx.Commit, which
|
||||
// initializes *bolt.Tx.db and *bolt.Tx.meta as nil,
|
||||
// and subsequent *bolt.Tx.Size() call panics.
|
||||
//
|
||||
// This nil pointer reference panic happens when:
|
||||
// 1. batchTx.commit(false) from newBatchTx
|
||||
// 2. batchTx.commit(true) from stopping backend
|
||||
// 3. batchTx.commit(false) from inflight mvcc Hash call
|
||||
//
|
||||
// Check if db is nil to prevent this panic
|
||||
if t.tx.DB() != nil {
|
||||
atomic.StoreInt64(&t.backend.size, t.tx.Size())
|
||||
}
|
||||
return
|
||||
}
|
||||
start := time.Now()
|
||||
// gofail: var beforeCommit struct{}
|
||||
err = t.tx.Commit()
|
||||
// gofail: var afterCommit struct{}
|
||||
commitDurations.Observe(time.Since(start).Seconds())
|
||||
atomic.AddInt64(&t.backend.commits, 1)
|
||||
|
||||
|
||||
23
vendor/github.com/coreos/etcd/mvcc/index.go
generated
vendored
23
vendor/github.com/coreos/etcd/mvcc/index.go
generated
vendored
@@ -25,11 +25,11 @@ type index interface {
|
||||
Get(key []byte, atRev int64) (rev, created revision, ver int64, err error)
|
||||
Range(key, end []byte, atRev int64) ([][]byte, []revision)
|
||||
Put(key []byte, rev revision)
|
||||
Restore(key []byte, created, modified revision, ver int64)
|
||||
Tombstone(key []byte, rev revision) error
|
||||
RangeSince(key, end []byte, rev int64) []revision
|
||||
Compact(rev int64) map[revision]struct{}
|
||||
Equal(b index) bool
|
||||
Insert(ki *keyIndex)
|
||||
}
|
||||
|
||||
type treeIndex struct {
|
||||
@@ -58,21 +58,6 @@ func (ti *treeIndex) Put(key []byte, rev revision) {
|
||||
okeyi.put(rev.main, rev.sub)
|
||||
}
|
||||
|
||||
func (ti *treeIndex) Restore(key []byte, created, modified revision, ver int64) {
|
||||
keyi := &keyIndex{key: key}
|
||||
|
||||
ti.Lock()
|
||||
defer ti.Unlock()
|
||||
item := ti.tree.Get(keyi)
|
||||
if item == nil {
|
||||
keyi.restore(created, modified, ver)
|
||||
ti.tree.ReplaceOrInsert(keyi)
|
||||
return
|
||||
}
|
||||
okeyi := item.(*keyIndex)
|
||||
okeyi.put(modified.main, modified.sub)
|
||||
}
|
||||
|
||||
func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) {
|
||||
keyi := &keyIndex{key: key}
|
||||
|
||||
@@ -215,3 +200,9 @@ func (a *treeIndex) Equal(bi index) bool {
|
||||
|
||||
return equal
|
||||
}
|
||||
|
||||
func (ti *treeIndex) Insert(ki *keyIndex) {
|
||||
ti.Lock()
|
||||
defer ti.Unlock()
|
||||
ti.tree.ReplaceOrInsert(ki)
|
||||
}
|
||||
|
||||
89
vendor/github.com/coreos/etcd/mvcc/kvstore.go
generated
vendored
89
vendor/github.com/coreos/etcd/mvcc/kvstore.go
generated
vendored
@@ -74,8 +74,9 @@ type store struct {
|
||||
// the main revision of the last compaction
|
||||
compactMainRev int64
|
||||
|
||||
tx backend.BatchTx
|
||||
txnID int64 // tracks the current txnID to verify txn operations
|
||||
tx backend.BatchTx
|
||||
txnID int64 // tracks the current txnID to verify txn operations
|
||||
txnModify bool
|
||||
|
||||
// bytesBuf8 is a byte slice of length 8
|
||||
// to avoid a repetitive allocation in saveIndex.
|
||||
@@ -180,7 +181,6 @@ func (s *store) TxnBegin() int64 {
|
||||
s.currentRev.sub = 0
|
||||
s.tx = s.b.BatchTx()
|
||||
s.tx.Lock()
|
||||
s.saveIndex()
|
||||
|
||||
s.txnID = rand.Int63()
|
||||
return s.txnID
|
||||
@@ -203,6 +203,14 @@ func (s *store) txnEnd(txnID int64) error {
|
||||
return ErrTxnIDMismatch
|
||||
}
|
||||
|
||||
// only update index if the txn modifies the mvcc state.
|
||||
// read only txn might execute with one write txn concurrently,
|
||||
// it should not write its index to mvcc.
|
||||
if s.txnModify {
|
||||
s.saveIndex()
|
||||
}
|
||||
s.txnModify = false
|
||||
|
||||
s.tx.Unlock()
|
||||
if s.currentRev.sub != 0 {
|
||||
s.currentRev.main += 1
|
||||
@@ -314,20 +322,23 @@ func (s *store) Compact(rev int64) (<-chan struct{}, error) {
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
func (s *store) Hash() (uint32, int64, error) {
|
||||
s.b.ForceCommit()
|
||||
// DefaultIgnores is a map of keys to ignore in hash checking.
|
||||
var DefaultIgnores map[backend.IgnoreKey]struct{}
|
||||
|
||||
func init() {
|
||||
DefaultIgnores = map[backend.IgnoreKey]struct{}{
|
||||
// consistent index might be changed due to v2 internal sync, which
|
||||
// is not controllable by the user.
|
||||
{Bucket: string(metaBucketName), Key: string(consistentIndexKeyName)}: {},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *store) Hash() (uint32, int64, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.b.ForceCommit()
|
||||
|
||||
// ignore hash consistent index field for now.
|
||||
// consistent index might be changed due to v2 internal sync, which
|
||||
// is not controllable by the user.
|
||||
ignores := make(map[backend.IgnoreKey]struct{})
|
||||
bk := backend.IgnoreKey{Bucket: string(metaBucketName), Key: string(consistentIndexKeyName)}
|
||||
ignores[bk] = struct{}{}
|
||||
|
||||
h, err := s.b.Hash(ignores)
|
||||
h, err := s.b.Hash(DefaultIgnores)
|
||||
rev := s.currentRev.main
|
||||
return h, rev, err
|
||||
}
|
||||
@@ -369,6 +380,11 @@ func (s *store) restore() error {
|
||||
|
||||
keyToLease := make(map[string]lease.LeaseID)
|
||||
|
||||
// use an unordered map to hold the temp index data to speed up
|
||||
// the initial key index recovery.
|
||||
// we will convert this unordered map into the tree index later.
|
||||
unordered := make(map[string]*keyIndex, 100000)
|
||||
|
||||
// restore index
|
||||
tx := s.b.BatchTx()
|
||||
tx.Lock()
|
||||
@@ -391,11 +407,20 @@ func (s *store) restore() error {
|
||||
// restore index
|
||||
switch {
|
||||
case isTombstone(key):
|
||||
s.kvindex.Tombstone(kv.Key, rev)
|
||||
if ki, ok := unordered[string(kv.Key)]; ok {
|
||||
ki.tombstone(rev.main, rev.sub)
|
||||
}
|
||||
delete(keyToLease, string(kv.Key))
|
||||
|
||||
default:
|
||||
s.kvindex.Restore(kv.Key, revision{kv.CreateRevision, 0}, rev, kv.Version)
|
||||
ki, ok := unordered[string(kv.Key)]
|
||||
if ok {
|
||||
ki.put(rev.main, rev.sub)
|
||||
} else {
|
||||
ki = &keyIndex{key: kv.Key}
|
||||
ki.restore(revision{kv.CreateRevision, 0}, rev, kv.Version)
|
||||
unordered[string(kv.Key)] = ki
|
||||
}
|
||||
|
||||
if lid := lease.LeaseID(kv.Lease); lid != lease.NoLease {
|
||||
keyToLease[string(kv.Key)] = lid
|
||||
@@ -408,6 +433,11 @@ func (s *store) restore() error {
|
||||
s.currentRev = rev
|
||||
}
|
||||
|
||||
// restore the tree index from the unordered index.
|
||||
for _, v := range unordered {
|
||||
s.kvindex.Insert(v)
|
||||
}
|
||||
|
||||
// keys in the range [compacted revision -N, compaction] might all be deleted due to compaction.
|
||||
// the correct revision should be set to compaction revision in the case, not the largest revision
|
||||
// we have seen.
|
||||
@@ -509,23 +539,18 @@ func (s *store) rangeKeys(key, end []byte, limit, rangeRev int64, countOnly bool
|
||||
}
|
||||
|
||||
func (s *store) put(key, value []byte, leaseID lease.LeaseID) {
|
||||
s.txnModify = true
|
||||
|
||||
rev := s.currentRev.main + 1
|
||||
c := rev
|
||||
oldLease := lease.NoLease
|
||||
|
||||
// if the key exists before, use its previous created and
|
||||
// get its previous leaseID
|
||||
grev, created, ver, err := s.kvindex.Get(key, rev)
|
||||
_, created, ver, err := s.kvindex.Get(key, rev)
|
||||
if err == nil {
|
||||
c = created.main
|
||||
ibytes := newRevBytes()
|
||||
revToBytes(grev, ibytes)
|
||||
_, vs := s.tx.UnsafeRange(keyBucketName, ibytes, nil, 0)
|
||||
var kv mvccpb.KeyValue
|
||||
if err = kv.Unmarshal(vs[0]); err != nil {
|
||||
plog.Fatalf("cannot unmarshal value: %v", err)
|
||||
}
|
||||
oldLease = lease.LeaseID(kv.Lease)
|
||||
oldLease = s.le.GetLease(lease.LeaseItem{Key: string(key)})
|
||||
}
|
||||
|
||||
ibytes := newRevBytes()
|
||||
@@ -575,6 +600,8 @@ func (s *store) put(key, value []byte, leaseID lease.LeaseID) {
|
||||
}
|
||||
|
||||
func (s *store) deleteRange(key, end []byte) int64 {
|
||||
s.txnModify = true
|
||||
|
||||
rrev := s.currentRev.main
|
||||
if s.currentRev.sub > 0 {
|
||||
rrev += 1
|
||||
@@ -615,17 +642,11 @@ func (s *store) delete(key []byte, rev revision) {
|
||||
s.changes = append(s.changes, kv)
|
||||
s.currentRev.sub += 1
|
||||
|
||||
ibytes = newRevBytes()
|
||||
revToBytes(rev, ibytes)
|
||||
_, vs := s.tx.UnsafeRange(keyBucketName, ibytes, nil, 0)
|
||||
item := lease.LeaseItem{Key: string(key)}
|
||||
leaseID := s.le.GetLease(item)
|
||||
|
||||
kv.Reset()
|
||||
if err = kv.Unmarshal(vs[0]); err != nil {
|
||||
plog.Fatalf("cannot unmarshal value: %v", err)
|
||||
}
|
||||
|
||||
if lease.LeaseID(kv.Lease) != lease.NoLease {
|
||||
err = s.le.Detach(lease.LeaseID(kv.Lease), []lease.LeaseItem{{Key: string(kv.Key)}})
|
||||
if leaseID != lease.NoLease {
|
||||
err = s.le.Detach(leaseID, []lease.LeaseItem{item})
|
||||
if err != nil {
|
||||
plog.Errorf("cannot detach %v", err)
|
||||
}
|
||||
|
||||
160
vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go
generated
vendored
160
vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go
generated
vendored
@@ -31,7 +31,9 @@ var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
const _ = proto.ProtoPackageIsVersion1
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type Event_EventType int32
|
||||
|
||||
@@ -103,91 +105,91 @@ func init() {
|
||||
proto.RegisterType((*Event)(nil), "mvccpb.Event")
|
||||
proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value)
|
||||
}
|
||||
func (m *KeyValue) Marshal() (data []byte, err error) {
|
||||
func (m *KeyValue) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *KeyValue) MarshalTo(data []byte) (int, error) {
|
||||
func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Key) > 0 {
|
||||
data[i] = 0xa
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintKv(data, i, uint64(len(m.Key)))
|
||||
i += copy(data[i:], m.Key)
|
||||
i = encodeVarintKv(dAtA, i, uint64(len(m.Key)))
|
||||
i += copy(dAtA[i:], m.Key)
|
||||
}
|
||||
if m.CreateRevision != 0 {
|
||||
data[i] = 0x10
|
||||
dAtA[i] = 0x10
|
||||
i++
|
||||
i = encodeVarintKv(data, i, uint64(m.CreateRevision))
|
||||
i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision))
|
||||
}
|
||||
if m.ModRevision != 0 {
|
||||
data[i] = 0x18
|
||||
dAtA[i] = 0x18
|
||||
i++
|
||||
i = encodeVarintKv(data, i, uint64(m.ModRevision))
|
||||
i = encodeVarintKv(dAtA, i, uint64(m.ModRevision))
|
||||
}
|
||||
if m.Version != 0 {
|
||||
data[i] = 0x20
|
||||
dAtA[i] = 0x20
|
||||
i++
|
||||
i = encodeVarintKv(data, i, uint64(m.Version))
|
||||
i = encodeVarintKv(dAtA, i, uint64(m.Version))
|
||||
}
|
||||
if len(m.Value) > 0 {
|
||||
data[i] = 0x2a
|
||||
dAtA[i] = 0x2a
|
||||
i++
|
||||
i = encodeVarintKv(data, i, uint64(len(m.Value)))
|
||||
i += copy(data[i:], m.Value)
|
||||
i = encodeVarintKv(dAtA, i, uint64(len(m.Value)))
|
||||
i += copy(dAtA[i:], m.Value)
|
||||
}
|
||||
if m.Lease != 0 {
|
||||
data[i] = 0x30
|
||||
dAtA[i] = 0x30
|
||||
i++
|
||||
i = encodeVarintKv(data, i, uint64(m.Lease))
|
||||
i = encodeVarintKv(dAtA, i, uint64(m.Lease))
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *Event) Marshal() (data []byte, err error) {
|
||||
func (m *Event) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Event) MarshalTo(data []byte) (int, error) {
|
||||
func (m *Event) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Type != 0 {
|
||||
data[i] = 0x8
|
||||
dAtA[i] = 0x8
|
||||
i++
|
||||
i = encodeVarintKv(data, i, uint64(m.Type))
|
||||
i = encodeVarintKv(dAtA, i, uint64(m.Type))
|
||||
}
|
||||
if m.Kv != nil {
|
||||
data[i] = 0x12
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintKv(data, i, uint64(m.Kv.Size()))
|
||||
n1, err := m.Kv.MarshalTo(data[i:])
|
||||
i = encodeVarintKv(dAtA, i, uint64(m.Kv.Size()))
|
||||
n1, err := m.Kv.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n1
|
||||
}
|
||||
if m.PrevKv != nil {
|
||||
data[i] = 0x1a
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
i = encodeVarintKv(data, i, uint64(m.PrevKv.Size()))
|
||||
n2, err := m.PrevKv.MarshalTo(data[i:])
|
||||
i = encodeVarintKv(dAtA, i, uint64(m.PrevKv.Size()))
|
||||
n2, err := m.PrevKv.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -196,31 +198,31 @@ func (m *Event) MarshalTo(data []byte) (int, error) {
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Kv(data []byte, offset int, v uint64) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
data[offset+4] = uint8(v >> 32)
|
||||
data[offset+5] = uint8(v >> 40)
|
||||
data[offset+6] = uint8(v >> 48)
|
||||
data[offset+7] = uint8(v >> 56)
|
||||
func encodeFixed64Kv(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Kv(data []byte, offset int, v uint32) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
func encodeFixed32Kv(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintKv(data []byte, offset int, v uint64) int {
|
||||
func encodeVarintKv(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
data[offset] = uint8(v&0x7f | 0x80)
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
data[offset] = uint8(v)
|
||||
dAtA[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func (m *KeyValue) Size() (n int) {
|
||||
@@ -279,8 +281,8 @@ func sovKv(x uint64) (n int) {
|
||||
func sozKv(x uint64) (n int) {
|
||||
return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *KeyValue) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
func (m *KeyValue) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
@@ -292,7 +294,7 @@ func (m *KeyValue) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -320,7 +322,7 @@ func (m *KeyValue) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -334,7 +336,7 @@ func (m *KeyValue) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Key = append(m.Key[:0], data[iNdEx:postIndex]...)
|
||||
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Key == nil {
|
||||
m.Key = []byte{}
|
||||
}
|
||||
@@ -351,7 +353,7 @@ func (m *KeyValue) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.CreateRevision |= (int64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -370,7 +372,7 @@ func (m *KeyValue) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.ModRevision |= (int64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -389,7 +391,7 @@ func (m *KeyValue) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Version |= (int64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -408,7 +410,7 @@ func (m *KeyValue) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -422,7 +424,7 @@ func (m *KeyValue) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Value = append(m.Value[:0], data[iNdEx:postIndex]...)
|
||||
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Value == nil {
|
||||
m.Value = []byte{}
|
||||
}
|
||||
@@ -439,7 +441,7 @@ func (m *KeyValue) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Lease |= (int64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -448,7 +450,7 @@ func (m *KeyValue) Unmarshal(data []byte) error {
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipKv(data[iNdEx:])
|
||||
skippy, err := skipKv(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -467,8 +469,8 @@ func (m *KeyValue) Unmarshal(data []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *Event) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
func (m *Event) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
@@ -480,7 +482,7 @@ func (m *Event) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -508,7 +510,7 @@ func (m *Event) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Type |= (Event_EventType(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -527,7 +529,7 @@ func (m *Event) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -544,7 +546,7 @@ func (m *Event) Unmarshal(data []byte) error {
|
||||
if m.Kv == nil {
|
||||
m.Kv = &KeyValue{}
|
||||
}
|
||||
if err := m.Kv.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
||||
if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
@@ -560,7 +562,7 @@ func (m *Event) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -577,13 +579,13 @@ func (m *Event) Unmarshal(data []byte) error {
|
||||
if m.PrevKv == nil {
|
||||
m.PrevKv = &KeyValue{}
|
||||
}
|
||||
if err := m.PrevKv.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
||||
if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipKv(data[iNdEx:])
|
||||
skippy, err := skipKv(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -602,8 +604,8 @@ func (m *Event) Unmarshal(data []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipKv(data []byte) (n int, err error) {
|
||||
l := len(data)
|
||||
func skipKv(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
@@ -614,7 +616,7 @@ func skipKv(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -632,7 +634,7 @@ func skipKv(data []byte) (n int, err error) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if data[iNdEx-1] < 0x80 {
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -649,7 +651,7 @@ func skipKv(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -672,7 +674,7 @@ func skipKv(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -683,7 +685,7 @@ func skipKv(data []byte) (n int, err error) {
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipKv(data[start:])
|
||||
next, err := skipKv(dAtA[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -707,6 +709,8 @@ var (
|
||||
ErrIntOverflowKv = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() { proto.RegisterFile("kv.proto", fileDescriptorKv) }
|
||||
|
||||
var fileDescriptorKv = []byte{
|
||||
// 303 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40,
|
||||
|
||||
1
vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto
generated
vendored
1
vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto
generated
vendored
@@ -43,6 +43,7 @@ message Event {
|
||||
// A DELETE/EXPIRE event contains the deleted key with
|
||||
// its modification revision set to the revision of deletion.
|
||||
KeyValue kv = 2;
|
||||
|
||||
// prev_kv holds the key-value pair before the event happens.
|
||||
KeyValue prev_kv = 3;
|
||||
}
|
||||
|
||||
63
vendor/github.com/coreos/etcd/mvcc/watchable_store.go
generated
vendored
63
vendor/github.com/coreos/etcd/mvcc/watchable_store.go
generated
vendored
@@ -35,7 +35,7 @@ const (
|
||||
)
|
||||
|
||||
type watchable interface {
|
||||
watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse) (*watcher, cancelFunc)
|
||||
watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc)
|
||||
progress(w *watcher)
|
||||
rev() int64
|
||||
}
|
||||
@@ -185,7 +185,7 @@ func (s *watchableStore) NewWatchStream() WatchStream {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse) (*watcher, cancelFunc) {
|
||||
func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
@@ -195,6 +195,7 @@ func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch c
|
||||
minRev: startRev,
|
||||
id: id,
|
||||
ch: ch,
|
||||
fcs: fcs,
|
||||
}
|
||||
|
||||
s.store.mu.Lock()
|
||||
@@ -325,10 +326,9 @@ func (s *watchableStore) moveVictims() (moved int) {
|
||||
for w, eb := range wb {
|
||||
// watcher has observed the store up to, but not including, w.minRev
|
||||
rev := w.minRev - 1
|
||||
select {
|
||||
case w.ch <- WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}:
|
||||
if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) {
|
||||
pendingEventsGauge.Add(float64(len(eb.evs)))
|
||||
default:
|
||||
} else {
|
||||
if newVictim == nil {
|
||||
newVictim = make(watcherBatch)
|
||||
}
|
||||
@@ -419,10 +419,9 @@ func (s *watchableStore) syncWatchers() {
|
||||
w.minRev = eb.moreRev
|
||||
}
|
||||
|
||||
select {
|
||||
case w.ch <- WatchResponse{WatchID: w.id, Events: eb.evs, Revision: curRev}:
|
||||
if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: curRev}) {
|
||||
pendingEventsGauge.Add(float64(len(eb.evs)))
|
||||
default:
|
||||
} else {
|
||||
if victims == nil {
|
||||
victims = make(watcherBatch)
|
||||
}
|
||||
@@ -480,10 +479,10 @@ func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) {
|
||||
if eb.revs != 1 {
|
||||
plog.Panicf("unexpected multiple revisions in notification")
|
||||
}
|
||||
select {
|
||||
case w.ch <- WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}:
|
||||
|
||||
if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) {
|
||||
pendingEventsGauge.Add(float64(len(eb.evs)))
|
||||
default:
|
||||
} else {
|
||||
// move slow watcher to victims
|
||||
w.minRev = rev + 1
|
||||
if victim == nil {
|
||||
@@ -516,12 +515,9 @@ func (s *watchableStore) progress(w *watcher) {
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if _, ok := s.synced.watchers[w]; ok {
|
||||
select {
|
||||
case w.ch <- WatchResponse{WatchID: w.id, Revision: s.rev()}:
|
||||
default:
|
||||
// If the ch is full, this watcher is receiving events.
|
||||
// We do not need to send progress at all.
|
||||
}
|
||||
w.send(WatchResponse{WatchID: w.id, Revision: s.rev()})
|
||||
// If the ch is full, this watcher is receiving events.
|
||||
// We do not need to send progress at all.
|
||||
}
|
||||
}
|
||||
|
||||
@@ -542,7 +538,40 @@ type watcher struct {
|
||||
minRev int64
|
||||
id WatchID
|
||||
|
||||
fcs []FilterFunc
|
||||
// a chan to send out the watch response.
|
||||
// The chan might be shared with other watchers.
|
||||
ch chan<- WatchResponse
|
||||
}
|
||||
|
||||
func (w *watcher) send(wr WatchResponse) bool {
|
||||
progressEvent := len(wr.Events) == 0
|
||||
|
||||
if len(w.fcs) != 0 {
|
||||
ne := make([]mvccpb.Event, 0, len(wr.Events))
|
||||
for i := range wr.Events {
|
||||
filtered := false
|
||||
for _, filter := range w.fcs {
|
||||
if filter(wr.Events[i]) {
|
||||
filtered = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !filtered {
|
||||
ne = append(ne, wr.Events[i])
|
||||
}
|
||||
}
|
||||
wr.Events = ne
|
||||
}
|
||||
|
||||
// if all events are filtered out, we should send nothing.
|
||||
if !progressEvent && len(wr.Events) == 0 {
|
||||
return true
|
||||
}
|
||||
select {
|
||||
case w.ch <- wr:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
9
vendor/github.com/coreos/etcd/mvcc/watcher.go
generated
vendored
9
vendor/github.com/coreos/etcd/mvcc/watcher.go
generated
vendored
@@ -28,6 +28,9 @@ var (
|
||||
|
||||
type WatchID int64
|
||||
|
||||
// FilterFunc returns true if the given event should be filtered out.
|
||||
type FilterFunc func(e mvccpb.Event) bool
|
||||
|
||||
type WatchStream interface {
|
||||
// Watch creates a watcher. The watcher watches the events happening or
|
||||
// happened on the given key or range [key, end) from the given startRev.
|
||||
@@ -38,7 +41,7 @@ type WatchStream interface {
|
||||
// The returned `id` is the ID of this watcher. It appears as WatchID
|
||||
// in events that are sent to the created watcher through stream channel.
|
||||
//
|
||||
Watch(key, end []byte, startRev int64) WatchID
|
||||
Watch(key, end []byte, startRev int64, fcs ...FilterFunc) WatchID
|
||||
|
||||
// Chan returns a chan. All watch response will be sent to the returned chan.
|
||||
Chan() <-chan WatchResponse
|
||||
@@ -96,7 +99,7 @@ type watchStream struct {
|
||||
|
||||
// Watch creates a new watcher in the stream and returns its WatchID.
|
||||
// TODO: return error if ws is closed?
|
||||
func (ws *watchStream) Watch(key, end []byte, startRev int64) WatchID {
|
||||
func (ws *watchStream) Watch(key, end []byte, startRev int64, fcs ...FilterFunc) WatchID {
|
||||
// prevent wrong range where key >= end lexicographically
|
||||
// watch request with 'WithFromKey' has empty-byte range end
|
||||
if len(end) != 0 && bytes.Compare(key, end) != -1 {
|
||||
@@ -112,7 +115,7 @@ func (ws *watchStream) Watch(key, end []byte, startRev int64) WatchID {
|
||||
id := ws.nextID
|
||||
ws.nextID++
|
||||
|
||||
w, c := ws.watchable.watch(key, end, startRev, id, ws.ch)
|
||||
w, c := ws.watchable.watch(key, end, startRev, id, ws.ch, fcs...)
|
||||
|
||||
ws.cancels[id] = c
|
||||
ws.watchers[id] = w
|
||||
|
||||
4
vendor/github.com/coreos/etcd/mvcc/watcher_group.go
generated
vendored
4
vendor/github.com/coreos/etcd/mvcc/watcher_group.go
generated
vendored
@@ -78,6 +78,10 @@ func (wb watcherBatch) add(w *watcher, ev mvccpb.Event) {
|
||||
// newWatcherBatch maps watchers to their matched events. It enables quick
|
||||
// events look up by watcher.
|
||||
func newWatcherBatch(wg *watcherGroup, evs []mvccpb.Event) watcherBatch {
|
||||
if len(wg.watchers) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
wb := make(watcherBatch)
|
||||
for _, ev := range evs {
|
||||
for w := range wg.watcherSetByKey(string(ev.Kv.Key)) {
|
||||
|
||||
3
vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go
generated
vendored
3
vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go
generated
vendored
@@ -447,6 +447,9 @@ func (ivt *IntervalTree) Contains(iv Interval) bool {
|
||||
|
||||
// Stab returns a slice with all elements in the tree intersecting the interval.
|
||||
func (ivt *IntervalTree) Stab(iv Interval) (ivs []*IntervalValue) {
|
||||
if ivt.count == 0 {
|
||||
return nil
|
||||
}
|
||||
f := func(n *IntervalValue) bool { ivs = append(ivs, n); return true }
|
||||
ivt.Visit(iv, f)
|
||||
return ivs
|
||||
|
||||
16
vendor/github.com/coreos/etcd/pkg/cpuutil/doc.go
generated
vendored
Normal file
16
vendor/github.com/coreos/etcd/pkg/cpuutil/doc.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package cpuutil provides facilities for detecting cpu-specific features.
|
||||
package cpuutil
|
||||
36
vendor/github.com/coreos/etcd/pkg/cpuutil/endian.go
generated
vendored
Normal file
36
vendor/github.com/coreos/etcd/pkg/cpuutil/endian.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cpuutil
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const intWidth int = int(unsafe.Sizeof(0))
|
||||
|
||||
var byteOrder binary.ByteOrder
|
||||
|
||||
// ByteOrder returns the byte order for the CPU's native endianness.
|
||||
func ByteOrder() binary.ByteOrder { return byteOrder }
|
||||
|
||||
func init() {
|
||||
var i int = 0x1
|
||||
if v := (*[intWidth]byte)(unsafe.Pointer(&i)); v[0] == 0 {
|
||||
byteOrder = binary.BigEndian
|
||||
} else {
|
||||
byteOrder = binary.LittleEndian
|
||||
}
|
||||
}
|
||||
6
vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go
generated
vendored
6
vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go
generated
vendored
@@ -19,7 +19,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
@@ -33,13 +33,13 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/pkg", "fileutil")
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/fileutil")
|
||||
)
|
||||
|
||||
// IsDirWriteable checks if dir is writable by writing and removing a file
|
||||
// to dir. It returns nil if dir is writable.
|
||||
func IsDirWriteable(dir string) error {
|
||||
f := path.Join(dir, ".touch")
|
||||
f := filepath.Join(dir, ".touch")
|
||||
if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
4
vendor/github.com/coreos/etcd/pkg/fileutil/purge.go
generated
vendored
4
vendor/github.com/coreos/etcd/pkg/fileutil/purge.go
generated
vendored
@@ -16,7 +16,7 @@ package fileutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -45,7 +45,7 @@ func purgeFile(dirname string, suffix string, max uint, interval time.Duration,
|
||||
sort.Strings(newfnames)
|
||||
fnames = newfnames
|
||||
for len(newfnames) > int(max) {
|
||||
f := path.Join(dirname, newfnames[0])
|
||||
f := filepath.Join(dirname, newfnames[0])
|
||||
l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode)
|
||||
if err != nil {
|
||||
break
|
||||
|
||||
2
vendor/github.com/coreos/etcd/pkg/httputil/httputil.go
generated
vendored
2
vendor/github.com/coreos/etcd/pkg/httputil/httputil.go
generated
vendored
@@ -13,7 +13,7 @@ import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func RequestCanceler(rt http.RoundTripper, req *http.Request) func() {
|
||||
func RequestCanceler(req *http.Request) func() {
|
||||
ch := make(chan struct{})
|
||||
req.Cancel = ch
|
||||
|
||||
|
||||
6
vendor/github.com/coreos/etcd/pkg/monotime/issue15006.s
generated
vendored
Normal file
6
vendor/github.com/coreos/etcd/pkg/monotime/issue15006.s
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
// Copyright (C) 2016 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
// This file is intentionally empty.
|
||||
// It's a workaround for https://github.com/golang/go/issues/15006
|
||||
26
vendor/github.com/coreos/etcd/pkg/monotime/monotime.go
generated
vendored
Normal file
26
vendor/github.com/coreos/etcd/pkg/monotime/monotime.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package monotime
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Time represents a point in monotonic time
|
||||
type Time uint64
|
||||
|
||||
func (t Time) Add(d time.Duration) Time {
|
||||
return Time(uint64(t) + uint64(d.Nanoseconds()))
|
||||
}
|
||||
24
vendor/github.com/coreos/etcd/pkg/monotime/nanotime.go
generated
vendored
Normal file
24
vendor/github.com/coreos/etcd/pkg/monotime/nanotime.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright (C) 2016 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
// Package monotime provides a fast monotonic clock source.
|
||||
package monotime
|
||||
|
||||
import (
|
||||
_ "unsafe" // required to use //go:linkname
|
||||
)
|
||||
|
||||
//go:noescape
|
||||
//go:linkname nanotime runtime.nanotime
|
||||
func nanotime() int64
|
||||
|
||||
// Now returns the current time in nanoseconds from a monotonic clock.
|
||||
// The time returned is based on some arbitrary platform-specific point in the
|
||||
// past. The time returned is guaranteed to increase monotonically at a
|
||||
// constant rate, unlike time.Now() from the Go standard library, which may
|
||||
// slow down, speed up, jump forward or backward, due to NTP activity or leap
|
||||
// seconds.
|
||||
func Now() Time {
|
||||
return Time(nanotime())
|
||||
}
|
||||
32
vendor/github.com/coreos/etcd/pkg/netutil/isolate_linux.go
generated
vendored
32
vendor/github.com/coreos/etcd/pkg/netutil/isolate_linux.go
generated
vendored
@@ -43,17 +43,24 @@ func RecoverPort(port int) error {
|
||||
|
||||
// SetLatency adds latency in millisecond scale with random variations.
|
||||
func SetLatency(ms, rv int) error {
|
||||
ifces, err := GetDefaultInterfaces()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rv > ms {
|
||||
rv = 1
|
||||
}
|
||||
cmdStr := fmt.Sprintf("sudo tc qdisc add dev eth0 root netem delay %dms %dms distribution normal", ms, rv)
|
||||
_, err := exec.Command("/bin/sh", "-c", cmdStr).Output()
|
||||
if err != nil {
|
||||
// the rule has already been added. Overwrite it.
|
||||
cmdStr = fmt.Sprintf("sudo tc qdisc change dev eth0 root netem delay %dms %dms distribution normal", ms, rv)
|
||||
for ifce := range ifces {
|
||||
cmdStr := fmt.Sprintf("sudo tc qdisc add dev %s root netem delay %dms %dms distribution normal", ifce, ms, rv)
|
||||
_, err = exec.Command("/bin/sh", "-c", cmdStr).Output()
|
||||
if err != nil {
|
||||
return err
|
||||
// the rule has already been added. Overwrite it.
|
||||
cmdStr = fmt.Sprintf("sudo tc qdisc change dev %s root netem delay %dms %dms distribution normal", ifce, ms, rv)
|
||||
_, err = exec.Command("/bin/sh", "-c", cmdStr).Output()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -61,6 +68,15 @@ func SetLatency(ms, rv int) error {
|
||||
|
||||
// RemoveLatency resets latency configurations.
|
||||
func RemoveLatency() error {
|
||||
_, err := exec.Command("/bin/sh", "-c", "sudo tc qdisc del dev eth0 root netem").Output()
|
||||
return err
|
||||
ifces, err := GetDefaultInterfaces()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for ifce := range ifces {
|
||||
_, err = exec.Command("/bin/sh", "-c", fmt.Sprintf("sudo tc qdisc del dev %s root netem", ifce)).Output()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
60
vendor/github.com/coreos/etcd/pkg/netutil/netutil.go
generated
vendored
60
vendor/github.com/coreos/etcd/pkg/netutil/netutil.go
generated
vendored
@@ -20,22 +20,27 @@ import (
|
||||
"net/url"
|
||||
"reflect"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
)
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/pkg", "netutil")
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/netutil")
|
||||
|
||||
// indirection for testing
|
||||
resolveTCPAddr = net.ResolveTCPAddr
|
||||
)
|
||||
|
||||
const retryInterval = time.Second
|
||||
|
||||
// resolveTCPAddrs is a convenience wrapper for net.ResolveTCPAddr.
|
||||
// resolveTCPAddrs return a new set of url.URLs, in which all DNS hostnames
|
||||
// are resolved.
|
||||
func resolveTCPAddrs(urls [][]url.URL) ([][]url.URL, error) {
|
||||
func resolveTCPAddrs(ctx context.Context, urls [][]url.URL) ([][]url.URL, error) {
|
||||
newurls := make([][]url.URL, 0)
|
||||
for _, us := range urls {
|
||||
nus := make([]url.URL, len(us))
|
||||
@@ -47,37 +52,52 @@ func resolveTCPAddrs(urls [][]url.URL) ([][]url.URL, error) {
|
||||
nus[i] = *nu
|
||||
}
|
||||
for i, u := range nus {
|
||||
host, _, err := net.SplitHostPort(u.Host)
|
||||
h, err := resolveURL(ctx, u)
|
||||
if err != nil {
|
||||
plog.Errorf("could not parse url %s during tcp resolving", u.Host)
|
||||
return nil, err
|
||||
}
|
||||
if host == "localhost" {
|
||||
continue
|
||||
if h != "" {
|
||||
nus[i].Host = h
|
||||
}
|
||||
if net.ParseIP(host) != nil {
|
||||
continue
|
||||
}
|
||||
tcpAddr, err := resolveTCPAddr("tcp", u.Host)
|
||||
if err != nil {
|
||||
plog.Errorf("could not resolve host %s", u.Host)
|
||||
return nil, err
|
||||
}
|
||||
plog.Infof("resolving %s to %s", u.Host, tcpAddr.String())
|
||||
nus[i].Host = tcpAddr.String()
|
||||
}
|
||||
newurls = append(newurls, nus)
|
||||
}
|
||||
return newurls, nil
|
||||
}
|
||||
|
||||
func resolveURL(ctx context.Context, u url.URL) (string, error) {
|
||||
for ctx.Err() == nil {
|
||||
host, _, err := net.SplitHostPort(u.Host)
|
||||
if err != nil {
|
||||
plog.Errorf("could not parse url %s during tcp resolving", u.Host)
|
||||
return "", err
|
||||
}
|
||||
if host == "localhost" || net.ParseIP(host) != nil {
|
||||
return "", nil
|
||||
}
|
||||
tcpAddr, err := resolveTCPAddr("tcp", u.Host)
|
||||
if err == nil {
|
||||
plog.Infof("resolving %s to %s", u.Host, tcpAddr.String())
|
||||
return tcpAddr.String(), nil
|
||||
}
|
||||
plog.Warningf("failed resolving host %s (%v); retrying in %v", u.Host, err, retryInterval)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
plog.Errorf("could not resolve host %s", u.Host)
|
||||
return "", err
|
||||
case <-time.After(retryInterval):
|
||||
}
|
||||
}
|
||||
return "", ctx.Err()
|
||||
}
|
||||
|
||||
// urlsEqual checks equality of url.URLS between two arrays.
|
||||
// This check pass even if an URL is in hostname and opposite is in IP address.
|
||||
func urlsEqual(a []url.URL, b []url.URL) bool {
|
||||
func urlsEqual(ctx context.Context, a []url.URL, b []url.URL) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
urls, err := resolveTCPAddrs([][]url.URL{a, b})
|
||||
urls, err := resolveTCPAddrs(ctx, [][]url.URL{a, b})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
@@ -93,7 +113,7 @@ func urlsEqual(a []url.URL, b []url.URL) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func URLStringsEqual(a []string, b []string) bool {
|
||||
func URLStringsEqual(ctx context.Context, a []string, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
@@ -114,7 +134,7 @@ func URLStringsEqual(a []string, b []string) bool {
|
||||
urlsB = append(urlsB, *u)
|
||||
}
|
||||
|
||||
return urlsEqual(urlsA, urlsB)
|
||||
return urlsEqual(ctx, urlsA, urlsB)
|
||||
}
|
||||
|
||||
func IsNetworkTimeoutError(err error) bool {
|
||||
|
||||
33
vendor/github.com/coreos/etcd/pkg/netutil/routes.go
generated
vendored
Normal file
33
vendor/github.com/coreos/etcd/pkg/netutil/routes.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !linux
|
||||
|
||||
package netutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// GetDefaultHost fetches the a resolvable name that corresponds
|
||||
// to the machine's default routable interface
|
||||
func GetDefaultHost() (string, error) {
|
||||
return "", fmt.Errorf("default host not supported on %s_%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
||||
|
||||
// GetDefaultInterfaces fetches the device name of default routable interface.
|
||||
func GetDefaultInterfaces() (map[string]uint8, error) {
|
||||
return nil, fmt.Errorf("default host not supported on %s_%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
||||
250
vendor/github.com/coreos/etcd/pkg/netutil/routes_linux.go
generated
vendored
Normal file
250
vendor/github.com/coreos/etcd/pkg/netutil/routes_linux.go
generated
vendored
Normal file
@@ -0,0 +1,250 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
|
||||
package netutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
"syscall"
|
||||
|
||||
"github.com/coreos/etcd/pkg/cpuutil"
|
||||
)
|
||||
|
||||
var errNoDefaultRoute = fmt.Errorf("could not find default route")
|
||||
var errNoDefaultHost = fmt.Errorf("could not find default host")
|
||||
var errNoDefaultInterface = fmt.Errorf("could not find default interface")
|
||||
|
||||
// GetDefaultHost obtains the first IP address of machine from the routing table and returns the IP address as string.
|
||||
// An IPv4 address is preferred to an IPv6 address for backward compatibility.
|
||||
func GetDefaultHost() (string, error) {
|
||||
rmsgs, rerr := getDefaultRoutes()
|
||||
if rerr != nil {
|
||||
return "", rerr
|
||||
}
|
||||
|
||||
// prioritize IPv4
|
||||
if rmsg, ok := rmsgs[syscall.AF_INET]; ok {
|
||||
if host, err := chooseHost(syscall.AF_INET, rmsg); host != "" || err != nil {
|
||||
return host, err
|
||||
}
|
||||
delete(rmsgs, syscall.AF_INET)
|
||||
}
|
||||
|
||||
// sort so choice is deterministic
|
||||
var families []int
|
||||
for family := range rmsgs {
|
||||
families = append(families, int(family))
|
||||
}
|
||||
sort.Ints(families)
|
||||
|
||||
for _, f := range families {
|
||||
family := uint8(f)
|
||||
if host, err := chooseHost(family, rmsgs[family]); host != "" || err != nil {
|
||||
return host, err
|
||||
}
|
||||
}
|
||||
|
||||
return "", errNoDefaultHost
|
||||
}
|
||||
|
||||
func chooseHost(family uint8, rmsg *syscall.NetlinkMessage) (string, error) {
|
||||
host, oif, err := parsePREFSRC(rmsg)
|
||||
if host != "" || err != nil {
|
||||
return host, err
|
||||
}
|
||||
|
||||
// prefsrc not detected, fall back to getting address from iface
|
||||
ifmsg, ierr := getIfaceAddr(oif, family)
|
||||
if ierr != nil {
|
||||
return "", ierr
|
||||
}
|
||||
|
||||
attrs, aerr := syscall.ParseNetlinkRouteAttr(ifmsg)
|
||||
if aerr != nil {
|
||||
return "", aerr
|
||||
}
|
||||
|
||||
for _, attr := range attrs {
|
||||
// search for RTA_DST because ipv6 doesn't have RTA_SRC
|
||||
if attr.Attr.Type == syscall.RTA_DST {
|
||||
return net.IP(attr.Value).String(), nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func getDefaultRoutes() (map[uint8]*syscall.NetlinkMessage, error) {
|
||||
dat, err := syscall.NetlinkRIB(syscall.RTM_GETROUTE, syscall.AF_UNSPEC)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
msgs, msgErr := syscall.ParseNetlinkMessage(dat)
|
||||
if msgErr != nil {
|
||||
return nil, msgErr
|
||||
}
|
||||
|
||||
routes := make(map[uint8]*syscall.NetlinkMessage)
|
||||
rtmsg := syscall.RtMsg{}
|
||||
for _, m := range msgs {
|
||||
if m.Header.Type != syscall.RTM_NEWROUTE {
|
||||
continue
|
||||
}
|
||||
buf := bytes.NewBuffer(m.Data[:syscall.SizeofRtMsg])
|
||||
if rerr := binary.Read(buf, cpuutil.ByteOrder(), &rtmsg); rerr != nil {
|
||||
continue
|
||||
}
|
||||
if rtmsg.Dst_len == 0 && rtmsg.Table == syscall.RT_TABLE_MAIN {
|
||||
// zero-length Dst_len implies default route
|
||||
msg := m
|
||||
routes[rtmsg.Family] = &msg
|
||||
}
|
||||
}
|
||||
|
||||
if len(routes) > 0 {
|
||||
return routes, nil
|
||||
}
|
||||
|
||||
return nil, errNoDefaultRoute
|
||||
}
|
||||
|
||||
// Used to get an address of interface.
|
||||
func getIfaceAddr(idx uint32, family uint8) (*syscall.NetlinkMessage, error) {
|
||||
dat, err := syscall.NetlinkRIB(syscall.RTM_GETADDR, int(family))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
msgs, msgErr := syscall.ParseNetlinkMessage(dat)
|
||||
if msgErr != nil {
|
||||
return nil, msgErr
|
||||
}
|
||||
|
||||
ifaddrmsg := syscall.IfAddrmsg{}
|
||||
for _, m := range msgs {
|
||||
if m.Header.Type != syscall.RTM_NEWADDR {
|
||||
continue
|
||||
}
|
||||
buf := bytes.NewBuffer(m.Data[:syscall.SizeofIfAddrmsg])
|
||||
if rerr := binary.Read(buf, cpuutil.ByteOrder(), &ifaddrmsg); rerr != nil {
|
||||
continue
|
||||
}
|
||||
if ifaddrmsg.Index == idx {
|
||||
return &m, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("could not find address for interface index %v", idx)
|
||||
|
||||
}
|
||||
|
||||
// Used to get a name of interface.
|
||||
func getIfaceLink(idx uint32) (*syscall.NetlinkMessage, error) {
|
||||
dat, err := syscall.NetlinkRIB(syscall.RTM_GETLINK, syscall.AF_UNSPEC)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
msgs, msgErr := syscall.ParseNetlinkMessage(dat)
|
||||
if msgErr != nil {
|
||||
return nil, msgErr
|
||||
}
|
||||
|
||||
ifinfomsg := syscall.IfInfomsg{}
|
||||
for _, m := range msgs {
|
||||
if m.Header.Type != syscall.RTM_NEWLINK {
|
||||
continue
|
||||
}
|
||||
buf := bytes.NewBuffer(m.Data[:syscall.SizeofIfInfomsg])
|
||||
if rerr := binary.Read(buf, cpuutil.ByteOrder(), &ifinfomsg); rerr != nil {
|
||||
continue
|
||||
}
|
||||
if ifinfomsg.Index == int32(idx) {
|
||||
return &m, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("could not find link for interface index %v", idx)
|
||||
}
|
||||
|
||||
// GetDefaultInterfaces gets names of interfaces and returns a map[interface]families.
|
||||
func GetDefaultInterfaces() (map[string]uint8, error) {
|
||||
interfaces := make(map[string]uint8)
|
||||
rmsgs, rerr := getDefaultRoutes()
|
||||
if rerr != nil {
|
||||
return interfaces, rerr
|
||||
}
|
||||
|
||||
for family, rmsg := range rmsgs {
|
||||
_, oif, err := parsePREFSRC(rmsg)
|
||||
if err != nil {
|
||||
return interfaces, err
|
||||
}
|
||||
|
||||
ifmsg, ierr := getIfaceLink(oif)
|
||||
if ierr != nil {
|
||||
return interfaces, ierr
|
||||
}
|
||||
|
||||
attrs, aerr := syscall.ParseNetlinkRouteAttr(ifmsg)
|
||||
if aerr != nil {
|
||||
return interfaces, aerr
|
||||
}
|
||||
|
||||
for _, attr := range attrs {
|
||||
if attr.Attr.Type == syscall.IFLA_IFNAME {
|
||||
// key is an interface name
|
||||
// possible values: 2 - AF_INET, 10 - AF_INET6, 12 - dualstack
|
||||
interfaces[string(attr.Value[:len(attr.Value)-1])] += family
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(interfaces) > 0 {
|
||||
return interfaces, nil
|
||||
}
|
||||
return interfaces, errNoDefaultInterface
|
||||
}
|
||||
|
||||
// parsePREFSRC returns preferred source address and output interface index (RTA_OIF).
|
||||
func parsePREFSRC(m *syscall.NetlinkMessage) (host string, oif uint32, err error) {
|
||||
var attrs []syscall.NetlinkRouteAttr
|
||||
attrs, err = syscall.ParseNetlinkRouteAttr(m)
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
|
||||
for _, attr := range attrs {
|
||||
if attr.Attr.Type == syscall.RTA_PREFSRC {
|
||||
host = net.IP(attr.Value).String()
|
||||
}
|
||||
if attr.Attr.Type == syscall.RTA_OIF {
|
||||
oif = cpuutil.ByteOrder().Uint32(attr.Value)
|
||||
}
|
||||
if host != "" && oif != uint32(0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if oif == 0 {
|
||||
err = errNoDefaultRoute
|
||||
}
|
||||
return
|
||||
}
|
||||
2
vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go
generated
vendored
2
vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go
generated
vendored
@@ -18,7 +18,7 @@ package pbutil
|
||||
import "github.com/coreos/pkg/capnslog"
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/pkg", "flags")
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/pbutil")
|
||||
)
|
||||
|
||||
type Marshaler interface {
|
||||
|
||||
2
vendor/github.com/coreos/etcd/pkg/testutil/leak.go
generated
vendored
2
vendor/github.com/coreos/etcd/pkg/testutil/leak.go
generated
vendored
@@ -106,6 +106,8 @@ func interestingGoroutines() (gs []string) {
|
||||
}
|
||||
stack := strings.TrimSpace(sl[1])
|
||||
if stack == "" ||
|
||||
strings.Contains(stack, "created by os/signal.init") ||
|
||||
strings.Contains(stack, "runtime/panic.go") ||
|
||||
strings.Contains(stack, "created by testing.RunTests") ||
|
||||
strings.Contains(stack, "testing.Main(") ||
|
||||
strings.Contains(stack, "runtime.goexit") ||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user