mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-28 12:23:55 +00:00
Update cadvisor and hcsshim versions
Signed-off-by: Kirtana Ashok <kiashok@microsoft.com>
This commit is contained in:
86
vendor/github.com/containerd/ttrpc/client.go
generated
vendored
86
vendor/github.com/containerd/ttrpc/client.go
generated
vendored
@@ -27,7 +27,7 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/containerd/log"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -71,6 +71,42 @@ func WithUnaryClientInterceptor(i UnaryClientInterceptor) ClientOpts {
|
||||
}
|
||||
}
|
||||
|
||||
// WithChainUnaryClientInterceptor sets the provided chain of client interceptors
|
||||
func WithChainUnaryClientInterceptor(interceptors ...UnaryClientInterceptor) ClientOpts {
|
||||
return func(c *Client) {
|
||||
if len(interceptors) == 0 {
|
||||
return
|
||||
}
|
||||
if c.interceptor != nil {
|
||||
interceptors = append([]UnaryClientInterceptor{c.interceptor}, interceptors...)
|
||||
}
|
||||
c.interceptor = func(
|
||||
ctx context.Context,
|
||||
req *Request,
|
||||
reply *Response,
|
||||
info *UnaryClientInfo,
|
||||
final Invoker,
|
||||
) error {
|
||||
return interceptors[0](ctx, req, reply, info,
|
||||
chainUnaryInterceptors(interceptors[1:], final, info))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func chainUnaryInterceptors(interceptors []UnaryClientInterceptor, final Invoker, info *UnaryClientInfo) Invoker {
|
||||
if len(interceptors) == 0 {
|
||||
return final
|
||||
}
|
||||
return func(
|
||||
ctx context.Context,
|
||||
req *Request,
|
||||
reply *Response,
|
||||
) error {
|
||||
return interceptors[0](ctx, req, reply, info,
|
||||
chainUnaryInterceptors(interceptors[1:], final, info))
|
||||
}
|
||||
}
|
||||
|
||||
// NewClient creates a new ttrpc client using the given connection
|
||||
func NewClient(conn net.Conn, opts ...ClientOpts) *Client {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -85,13 +121,16 @@ func NewClient(conn net.Conn, opts ...ClientOpts) *Client {
|
||||
ctx: ctx,
|
||||
userCloseFunc: func() {},
|
||||
userCloseWaitCh: make(chan struct{}),
|
||||
interceptor: defaultClientInterceptor,
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
o(c)
|
||||
}
|
||||
|
||||
if c.interceptor == nil {
|
||||
c.interceptor = defaultClientInterceptor
|
||||
}
|
||||
|
||||
go c.run()
|
||||
return c
|
||||
}
|
||||
@@ -286,7 +325,7 @@ func (c *Client) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// UserOnCloseWait is used to blocks untils the user's on-close callback
|
||||
// UserOnCloseWait is used to block until the user's on-close callback
|
||||
// finishes.
|
||||
func (c *Client) UserOnCloseWait(ctx context.Context) error {
|
||||
select {
|
||||
@@ -329,7 +368,7 @@ func (c *Client) receiveLoop() error {
|
||||
sid := streamID(msg.header.StreamID)
|
||||
s := c.getStream(sid)
|
||||
if s == nil {
|
||||
logrus.WithField("stream", sid).Errorf("ttrpc: received message on inactive stream")
|
||||
log.G(c.ctx).WithField("stream", sid).Error("ttrpc: received message on inactive stream")
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -337,7 +376,7 @@ func (c *Client) receiveLoop() error {
|
||||
s.closeWithError(err)
|
||||
} else {
|
||||
if err := s.receive(c.ctx, msg); err != nil {
|
||||
logrus.WithError(err).WithField("stream", sid).Errorf("ttrpc: failed to handle message")
|
||||
log.G(c.ctx).WithFields(log.Fields{"error": err, "stream": sid}).Error("ttrpc: failed to handle message")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -347,25 +386,44 @@ func (c *Client) receiveLoop() error {
|
||||
// createStream creates a new stream and registers it with the client
|
||||
// Introduce stream types for multiple or single response
|
||||
func (c *Client) createStream(flags uint8, b []byte) (*stream, error) {
|
||||
c.streamLock.Lock()
|
||||
// sendLock must be held across both allocation of the stream ID and sending it across the wire.
|
||||
// This ensures that new stream IDs sent on the wire are always increasing, which is a
|
||||
// requirement of the TTRPC protocol.
|
||||
// This use of sendLock could be split into another mutex that covers stream creation + first send,
|
||||
// and just use sendLock to guard writing to the wire, but for now it seems simpler to have fewer mutexes.
|
||||
c.sendLock.Lock()
|
||||
defer c.sendLock.Unlock()
|
||||
|
||||
// Check if closed since lock acquired to prevent adding
|
||||
// anything after cleanup completes
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
c.streamLock.Unlock()
|
||||
return nil, ErrClosed
|
||||
default:
|
||||
}
|
||||
|
||||
// Stream ID should be allocated at same time
|
||||
s := newStream(c.nextStreamID, c)
|
||||
c.streams[s.id] = s
|
||||
c.nextStreamID = c.nextStreamID + 2
|
||||
var s *stream
|
||||
if err := func() error {
|
||||
// In the future this could be replaced with a sync.Map instead of streamLock+map.
|
||||
c.streamLock.Lock()
|
||||
defer c.streamLock.Unlock()
|
||||
|
||||
c.sendLock.Lock()
|
||||
defer c.sendLock.Unlock()
|
||||
c.streamLock.Unlock()
|
||||
// Check if closed since lock acquired to prevent adding
|
||||
// anything after cleanup completes
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
return ErrClosed
|
||||
default:
|
||||
}
|
||||
|
||||
s = newStream(c.nextStreamID, c)
|
||||
c.streams[s.id] = s
|
||||
c.nextStreamID = c.nextStreamID + 2
|
||||
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := c.channel.send(uint32(s.id), messageTypeRequest, flags, b); err != nil {
|
||||
return s, filterCloseErr(err)
|
||||
|
||||
Reference in New Issue
Block a user