Updating to latest protos and device-management interface, releasing 2.0
Change-Id: I2d2ebf5b305d6d06b8d01c49d4d67e7ff050f5d4
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
index 8b10516..5cc3aed 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
@@ -25,6 +25,7 @@
"os"
"google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal/grpcutil"
)
// Logger is the global binary logger. It can be used to get binary logger for
@@ -39,6 +40,8 @@
// It is used to get a methodLogger for each individual method.
var binLogger Logger
+var grpclogLogger = grpclog.Component("binarylog")
+
// SetLogger sets the binarg logger.
//
// Only call this at init time.
@@ -146,9 +149,9 @@
// Each methodLogger returned by this method is a new instance. This is to
// generate sequence id within the call.
func (l *logger) getMethodLogger(methodName string) *MethodLogger {
- s, m, err := parseMethodName(methodName)
+ s, m, err := grpcutil.ParseMethod(methodName)
if err != nil {
- grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err)
+ grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err)
return nil
}
if ml, ok := l.methods[s+"/"+m]; ok {
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
index be30d0e..d8f4e76 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
@@ -24,8 +24,6 @@
"regexp"
"strconv"
"strings"
-
- "google.golang.org/grpc/grpclog"
)
// NewLoggerFromConfigString reads the string and build a logger. It can be used
@@ -52,7 +50,7 @@
methods := strings.Split(s, ",")
for _, method := range methods {
if err := l.fillMethodLoggerWithConfigString(method); err != nil {
- grpclog.Warningf("failed to parse binary log config: %v", err)
+ grpclogLogger.Warningf("failed to parse binary log config: %v", err)
return nil
}
}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
index 160f6e8..0cdb418 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
@@ -27,7 +27,6 @@
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
- "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
@@ -66,7 +65,7 @@
callID: idGen.next(),
idWithinCallGen: &callIDGenerator{},
- sink: defaultSink, // TODO(blog): make it plugable.
+ sink: DefaultSink, // TODO(blog): make it plugable.
}
}
@@ -219,12 +218,12 @@
if m, ok := c.Message.(proto.Message); ok {
data, err = proto.Marshal(m)
if err != nil {
- grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
+ grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
}
} else if b, ok := c.Message.([]byte); ok {
data = b
} else {
- grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
+ grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
}
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
@@ -259,12 +258,12 @@
if m, ok := c.Message.(proto.Message); ok {
data, err = proto.Marshal(m)
if err != nil {
- grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
+ grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
}
} else if b, ok := c.Message.([]byte); ok {
data = b
} else {
- grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
+ grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
}
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
@@ -315,7 +314,7 @@
func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
st, ok := status.FromError(c.Err)
if !ok {
- grpclog.Info("binarylogging: error in trailer is not a status error")
+ grpclogLogger.Info("binarylogging: error in trailer is not a status error")
}
var (
detailsBytes []byte
@@ -325,7 +324,7 @@
if stProto != nil && len(stProto.Details) != 0 {
detailsBytes, err = proto.Marshal(stProto)
if err != nil {
- grpclog.Infof("binarylogging: failed to marshal status proto: %v", err)
+ grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
}
}
ret := &pb.GrpcLogEntry{
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh b/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh
deleted file mode 100644
index 113d40c..0000000
--- a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-# Copyright 2018 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -eux -o pipefail
-
-TMP=$(mktemp -d)
-
-function finish {
- rm -rf "$TMP"
-}
-trap finish EXIT
-
-pushd "$TMP"
-mkdir -p grpc/binarylog/grpc_binarylog_v1
-curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto
-
-protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto
-popd
-rm -f ./grpc_binarylog_v1/*.pb.go
-cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/
-
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
index a2e7c34..c2fdd58 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
@@ -21,32 +21,23 @@
import (
"bufio"
"encoding/binary"
- "fmt"
"io"
- "io/ioutil"
"sync"
"time"
"github.com/golang/protobuf/proto"
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
- "google.golang.org/grpc/grpclog"
)
var (
- defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
+ // DefaultSink is the sink where the logs will be written to. It's exported
+ // for the binarylog package to update.
+ DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
)
-// SetDefaultSink sets the sink where binary logs will be written to.
-//
-// Not thread safe. Only set during initialization.
-func SetDefaultSink(s Sink) {
- if defaultSink != nil {
- defaultSink.Close()
- }
- defaultSink = s
-}
-
// Sink writes log entry into the binary log sink.
+//
+// sink is a copy of the exported binarylog.Sink, to avoid circular dependency.
type Sink interface {
// Write will be called to write the log entry into the sink.
//
@@ -67,7 +58,7 @@
// message is prefixed with a 4 byte big endian unsigned integer as the length.
//
// No buffer is done, Close() doesn't try to close the writer.
-func newWriterSink(w io.Writer) *writerSink {
+func newWriterSink(w io.Writer) Sink {
return &writerSink{out: w}
}
@@ -78,7 +69,8 @@
func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
b, err := proto.Marshal(e)
if err != nil {
- grpclog.Infof("binary logging: failed to marshal proto message: %v", err)
+ grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err)
+ return err
}
hdr := make([]byte, 4)
binary.BigEndian.PutUint32(hdr, uint32(len(b)))
@@ -93,25 +85,28 @@
func (ws *writerSink) Close() error { return nil }
-type bufWriteCloserSink struct {
- mu sync.Mutex
- closer io.Closer
- out *writerSink // out is built on buf.
- buf *bufio.Writer // buf is kept for flush.
+type bufferedSink struct {
+ mu sync.Mutex
+ closer io.Closer
+ out Sink // out is built on buf.
+ buf *bufio.Writer // buf is kept for flush.
+ flusherStarted bool
- writeStartOnce sync.Once
- writeTicker *time.Ticker
+ writeTicker *time.Ticker
+ done chan struct{}
}
-func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error {
- // Start the write loop when Write is called.
- fs.writeStartOnce.Do(fs.startFlushGoroutine)
+func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error {
fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if !fs.flusherStarted {
+ // Start the write loop when Write is called.
+ fs.startFlushGoroutine()
+ fs.flusherStarted = true
+ }
if err := fs.out.Write(e); err != nil {
- fs.mu.Unlock()
return err
}
- fs.mu.Unlock()
return nil
}
@@ -119,44 +114,57 @@
bufFlushDuration = 60 * time.Second
)
-func (fs *bufWriteCloserSink) startFlushGoroutine() {
+func (fs *bufferedSink) startFlushGoroutine() {
fs.writeTicker = time.NewTicker(bufFlushDuration)
go func() {
- for range fs.writeTicker.C {
+ for {
+ select {
+ case <-fs.done:
+ return
+ case <-fs.writeTicker.C:
+ }
fs.mu.Lock()
- fs.buf.Flush()
+ if err := fs.buf.Flush(); err != nil {
+ grpclogLogger.Warningf("failed to flush to Sink: %v", err)
+ }
fs.mu.Unlock()
}
}()
}
-func (fs *bufWriteCloserSink) Close() error {
+func (fs *bufferedSink) Close() error {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
if fs.writeTicker != nil {
fs.writeTicker.Stop()
}
- fs.mu.Lock()
- fs.buf.Flush()
- fs.closer.Close()
- fs.out.Close()
- fs.mu.Unlock()
+ close(fs.done)
+ if err := fs.buf.Flush(); err != nil {
+ grpclogLogger.Warningf("failed to flush to Sink: %v", err)
+ }
+ if err := fs.closer.Close(); err != nil {
+ grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err)
+ }
+ if err := fs.out.Close(); err != nil {
+ grpclogLogger.Warningf("failed to close the Sink: %v", err)
+ }
return nil
}
-func newBufWriteCloserSink(o io.WriteCloser) Sink {
+// NewBufferedSink creates a binary log sink with the given WriteCloser.
+//
+// Write() marshals the proto message and writes it to the given writer. Each
+// message is prefixed with a 4 byte big endian unsigned integer as the length.
+//
+// Content is kept in a buffer, and is flushed every 60 seconds.
+//
+// Close closes the WriteCloser.
+func NewBufferedSink(o io.WriteCloser) Sink {
bufW := bufio.NewWriter(o)
- return &bufWriteCloserSink{
+ return &bufferedSink{
closer: o,
out: newWriterSink(bufW),
buf: bufW,
+ done: make(chan struct{}),
}
}
-
-// NewTempFileSink creates a temp file and returns a Sink that writes to this
-// file.
-func NewTempFileSink() (Sink, error) {
- tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt")
- if err != nil {
- return nil, fmt.Errorf("failed to create temp file: %v", err)
- }
- return newBufWriteCloserSink(tempFile), nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/util.go b/vendor/google.golang.org/grpc/internal/binarylog/util.go
deleted file mode 100644
index 15dc780..0000000
--- a/vendor/google.golang.org/grpc/internal/binarylog/util.go
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package binarylog
-
-import (
- "errors"
- "strings"
-)
-
-// parseMethodName splits service and method from the input. It expects format
-// "/service/method".
-//
-// TODO: move to internal/grpcutil.
-func parseMethodName(methodName string) (service, method string, _ error) {
- if !strings.HasPrefix(methodName, "/") {
- return "", "", errors.New("invalid method name: should start with /")
- }
- methodName = methodName[1:]
-
- pos := strings.LastIndex(methodName, "/")
- if pos < 0 {
- return "", "", errors.New("invalid method name: suffix /method is missing")
- }
- return methodName[:pos], methodName[pos+1:], nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
index e4252e5..cd18075 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
@@ -30,7 +30,7 @@
"sync/atomic"
"time"
- "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/grpclog"
)
const (
@@ -204,9 +204,9 @@
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
}
if pid == 0 {
- db.get().addChannel(id, cn, true, pid, ref)
+ db.get().addChannel(id, cn, true, pid)
} else {
- db.get().addChannel(id, cn, false, pid, ref)
+ db.get().addChannel(id, cn, false, pid)
}
return id
}
@@ -216,7 +216,7 @@
// by pid). It returns the unique channelz tracking id assigned to this subchannel.
func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
if pid == 0 {
- grpclog.ErrorDepth(0, "a SubChannel's parent id cannot be 0")
+ logger.Error("a SubChannel's parent id cannot be 0")
return 0
}
id := idGen.genID()
@@ -228,7 +228,7 @@
pid: pid,
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
}
- db.get().addSubChannel(id, sc, pid, ref)
+ db.get().addSubChannel(id, sc, pid)
return id
}
@@ -253,12 +253,12 @@
// this listen socket.
func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
if pid == 0 {
- grpclog.ErrorDepth(0, "a ListenSocket's parent id cannot be 0")
+ logger.Error("a ListenSocket's parent id cannot be 0")
return 0
}
id := idGen.genID()
ls := &listenSocket{refName: ref, s: s, id: id, pid: pid}
- db.get().addListenSocket(id, ls, pid, ref)
+ db.get().addListenSocket(id, ls, pid)
return id
}
@@ -268,16 +268,16 @@
// this normal socket.
func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
if pid == 0 {
- grpclog.ErrorDepth(0, "a NormalSocket's parent id cannot be 0")
+ logger.Error("a NormalSocket's parent id cannot be 0")
return 0
}
id := idGen.genID()
ns := &normalSocket{refName: ref, s: s, id: id, pid: pid}
- db.get().addNormalSocket(id, ns, pid, ref)
+ db.get().addNormalSocket(id, ns, pid)
return id
}
-// RemoveEntry removes an entry with unique channelz trakcing id to be id from
+// RemoveEntry removes an entry with unique channelz tracking id to be id from
// channelz database.
func RemoveEntry(id int64) {
db.get().removeEntry(id)
@@ -294,17 +294,15 @@
}
// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
-func AddTraceEvent(id int64, depth int, desc *TraceEventDesc) {
+func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) {
for d := desc; d != nil; d = d.Parent {
switch d.Severity {
- case CtUNKNOWN:
- grpclog.InfoDepth(depth+1, d.Desc)
- case CtINFO:
- grpclog.InfoDepth(depth+1, d.Desc)
+ case CtUnknown, CtInfo:
+ l.InfoDepth(depth+1, d.Desc)
case CtWarning:
- grpclog.WarningDepth(depth+1, d.Desc)
+ l.WarningDepth(depth+1, d.Desc)
case CtError:
- grpclog.ErrorDepth(depth+1, d.Desc)
+ l.ErrorDepth(depth+1, d.Desc)
}
}
if getMaxTraceEntry() == 0 {
@@ -335,7 +333,7 @@
c.mu.Unlock()
}
-func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) {
+func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) {
c.mu.Lock()
cn.cm = c
cn.trace.cm = c
@@ -348,7 +346,7 @@
c.mu.Unlock()
}
-func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) {
+func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) {
c.mu.Lock()
sc.cm = c
sc.trace.cm = c
@@ -357,7 +355,7 @@
c.mu.Unlock()
}
-func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) {
+func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) {
c.mu.Lock()
ls.cm = c
c.listenSockets[id] = ls
@@ -365,7 +363,7 @@
c.mu.Unlock()
}
-func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) {
+func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) {
c.mu.Lock()
ns.cm = c
c.normalSockets[id] = ns
@@ -632,7 +630,7 @@
if count == 0 {
end = true
}
- var s []*SocketMetric
+ s := make([]*SocketMetric, 0, len(sks))
for _, ns := range sks {
sm := &SocketMetric{}
sm.SocketData = ns.s.ChannelzMetric()
diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go
index 59c7bed..b0013f9 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/logging.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go
@@ -21,80 +21,82 @@
import (
"fmt"
- "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/grpclog"
)
-// Info logs through grpclog.Info and adds a trace event if channelz is on.
-func Info(id int64, args ...interface{}) {
+var logger = grpclog.Component("channelz")
+
+// Info logs and adds a trace event if channelz is on.
+func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
if IsOn() {
- AddTraceEvent(id, 1, &TraceEventDesc{
+ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprint(args...),
- Severity: CtINFO,
+ Severity: CtInfo,
})
} else {
- grpclog.InfoDepth(1, args...)
+ l.InfoDepth(1, args...)
}
}
-// Infof logs through grpclog.Infof and adds a trace event if channelz is on.
-func Infof(id int64, format string, args ...interface{}) {
+// Infof logs and adds a trace event if channelz is on.
+func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
if IsOn() {
- AddTraceEvent(id, 1, &TraceEventDesc{
+ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: msg,
- Severity: CtINFO,
+ Severity: CtInfo,
})
} else {
- grpclog.InfoDepth(1, msg)
+ l.InfoDepth(1, msg)
}
}
-// Warning logs through grpclog.Warning and adds a trace event if channelz is on.
-func Warning(id int64, args ...interface{}) {
+// Warning logs and adds a trace event if channelz is on.
+func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
if IsOn() {
- AddTraceEvent(id, 1, &TraceEventDesc{
+ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprint(args...),
Severity: CtWarning,
})
} else {
- grpclog.WarningDepth(1, args...)
+ l.WarningDepth(1, args...)
}
}
-// Warningf logs through grpclog.Warningf and adds a trace event if channelz is on.
-func Warningf(id int64, format string, args ...interface{}) {
+// Warningf logs and adds a trace event if channelz is on.
+func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
if IsOn() {
- AddTraceEvent(id, 1, &TraceEventDesc{
+ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: msg,
Severity: CtWarning,
})
} else {
- grpclog.WarningDepth(1, msg)
+ l.WarningDepth(1, msg)
}
}
-// Error logs through grpclog.Error and adds a trace event if channelz is on.
-func Error(id int64, args ...interface{}) {
+// Error logs and adds a trace event if channelz is on.
+func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
if IsOn() {
- AddTraceEvent(id, 1, &TraceEventDesc{
+ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprint(args...),
Severity: CtError,
})
} else {
- grpclog.ErrorDepth(1, args...)
+ l.ErrorDepth(1, args...)
}
}
-// Errorf logs through grpclog.Errorf and adds a trace event if channelz is on.
-func Errorf(id int64, format string, args ...interface{}) {
+// Errorf logs and adds a trace event if channelz is on.
+func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
if IsOn() {
- AddTraceEvent(id, 1, &TraceEventDesc{
+ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: msg,
Severity: CtError,
})
} else {
- grpclog.ErrorDepth(1, msg)
+ l.ErrorDepth(1, msg)
}
}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go
index 17c2274..3c595d1 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/types.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/types.go
@@ -26,7 +26,6 @@
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
)
// entry represents a node in the channelz database.
@@ -60,17 +59,17 @@
// the addrConn will create a new transport. And when registering the new transport in
// channelz, its parent addrConn could have already been torn down and deleted
// from channelz tracking, and thus reach the code here.
- grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
+ logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
}
func (d *dummyEntry) deleteChild(id int64) {
// It is possible for a normal program to reach here under race condition.
// Refer to the example described in addChild().
- grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
+ logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
}
func (d *dummyEntry) triggerDelete() {
- grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
+ logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
}
func (*dummyEntry) deleteSelfIfReady() {
@@ -215,7 +214,7 @@
case *channel:
c.nestedChans[id] = v.refName
default:
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
+ logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
}
}
@@ -326,7 +325,7 @@
if v, ok := e.(*normalSocket); ok {
sc.sockets[id] = v.refName
} else {
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
+ logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
}
}
@@ -493,11 +492,11 @@
}
func (ls *listenSocket) addChild(id int64, e entry) {
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
+ logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
}
func (ls *listenSocket) deleteChild(id int64) {
- grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id)
+ logger.Errorf("cannot delete a child (id = %d) from a listen socket", id)
}
func (ls *listenSocket) triggerDelete() {
@@ -506,7 +505,7 @@
}
func (ls *listenSocket) deleteSelfIfReady() {
- grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket")
+ logger.Errorf("cannot call deleteSelfIfReady on a listen socket")
}
func (ls *listenSocket) getParentID() int64 {
@@ -522,11 +521,11 @@
}
func (ns *normalSocket) addChild(id int64, e entry) {
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
+ logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
}
func (ns *normalSocket) deleteChild(id int64) {
- grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id)
+ logger.Errorf("cannot delete a child (id = %d) from a normal socket", id)
}
func (ns *normalSocket) triggerDelete() {
@@ -535,7 +534,7 @@
}
func (ns *normalSocket) deleteSelfIfReady() {
- grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket")
+ logger.Errorf("cannot call deleteSelfIfReady on a normal socket")
}
func (ns *normalSocket) getParentID() int64 {
@@ -594,7 +593,7 @@
case *listenSocket:
s.listenSockets[id] = v.refName
default:
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
+ logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
}
}
@@ -673,10 +672,10 @@
type Severity int
const (
- // CtUNKNOWN indicates unknown severity of a trace event.
- CtUNKNOWN Severity = iota
- // CtINFO indicates info level severity of a trace event.
- CtINFO
+ // CtUnknown indicates unknown severity of a trace event.
+ CtUnknown Severity = iota
+ // CtInfo indicates info level severity of a trace event.
+ CtInfo
// CtWarning indicates warning level severity of a trace event.
CtWarning
// CtError indicates error level severity of a trace event.
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
index 692dd61..1b1c4cc 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
@@ -1,5 +1,3 @@
-// +build !appengine
-
/*
*
* Copyright 2018 gRPC authors.
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
index 79edbef..8b06eed 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
@@ -1,4 +1,5 @@
-// +build !linux appengine
+//go:build !linux
+// +build !linux
/*
*
@@ -22,8 +23,6 @@
import (
"sync"
-
- "google.golang.org/grpc/grpclog"
)
var once sync.Once
@@ -39,6 +38,6 @@
// Windows OS doesn't support Socket Option
func (s *SocketOptionData) Getsockopt(fd uintptr) {
once.Do(func() {
- grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.")
+ logger.Warning("Channelz: socket options are not supported on non-linux environments")
})
}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
index fdf409d..8d194e4 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
@@ -1,5 +1,3 @@
-// +build linux,!appengine
-
/*
*
* Copyright 2018 gRPC authors.
diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
index 8864a08..837ddc4 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
@@ -1,4 +1,5 @@
-// +build !linux appengine
+//go:build !linux
+// +build !linux
/*
*
diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go
new file mode 100644
index 0000000..32c9b59
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "context"
+)
+
+// requestInfoKey is a struct to be used as the key to store RequestInfo in a
+// context.
+type requestInfoKey struct{}
+
+// NewRequestInfoContext creates a context with ri.
+func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context {
+ return context.WithValue(ctx, requestInfoKey{}, ri)
+}
+
+// RequestInfoFromContext extracts the RequestInfo from ctx.
+func RequestInfoFromContext(ctx context.Context) interface{} {
+ return ctx.Value(requestInfoKey{})
+}
+
+// clientHandshakeInfoKey is a struct used as the key to store
+// ClientHandshakeInfo in a context.
+type clientHandshakeInfoKey struct{}
+
+// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx.
+func ClientHandshakeInfoFromContext(ctx context.Context) interface{} {
+ return ctx.Value(clientHandshakeInfoKey{})
+}
+
+// NewClientHandshakeInfoContext creates a context with chi.
+func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context {
+ return context.WithValue(ctx, clientHandshakeInfoKey{}, chi)
+}
diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go
new file mode 100644
index 0000000..25ade62
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go
@@ -0,0 +1,75 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package credentials defines APIs for parsing SPIFFE ID.
+//
+// All APIs in this package are experimental.
+package credentials
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "net/url"
+
+ "google.golang.org/grpc/grpclog"
+)
+
+var logger = grpclog.Component("credentials")
+
+// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format
+// is invalid, return nil with warning.
+func SPIFFEIDFromState(state tls.ConnectionState) *url.URL {
+ if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 {
+ return nil
+ }
+ return SPIFFEIDFromCert(state.PeerCertificates[0])
+}
+
+// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE
+// ID format is invalid, return nil with warning.
+func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL {
+ if cert == nil || cert.URIs == nil {
+ return nil
+ }
+ var spiffeID *url.URL
+ for _, uri := range cert.URIs {
+ if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") {
+ continue
+ }
+ // From this point, we assume the uri is intended for a SPIFFE ID.
+ if len(uri.String()) > 2048 {
+ logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes")
+ return nil
+ }
+ if len(uri.Host) == 0 || len(uri.Path) == 0 {
+ logger.Warning("invalid SPIFFE ID: domain or workload ID is empty")
+ return nil
+ }
+ if len(uri.Host) > 255 {
+ logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters")
+ return nil
+ }
+ // A valid SPIFFE certificate can only have exactly one URI SAN field.
+ if len(cert.URIs) > 1 {
+ logger.Warning("invalid SPIFFE ID: multiple URI SANs")
+ return nil
+ }
+ spiffeID = uri
+ }
+ return spiffeID
+}
diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
new file mode 100644
index 0000000..2919632
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
@@ -0,0 +1,58 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package credentials
+
+import (
+ "net"
+ "syscall"
+)
+
+type sysConn = syscall.Conn
+
+// syscallConn keeps reference of rawConn to support syscall.Conn for channelz.
+// SyscallConn() (the method in interface syscall.Conn) is explicitly
+// implemented on this type,
+//
+// Interface syscall.Conn is implemented by most net.Conn implementations (e.g.
+// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns
+// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn
+// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't
+// help here).
+type syscallConn struct {
+ net.Conn
+ // sysConn is a type alias of syscall.Conn. It's necessary because the name
+ // `Conn` collides with `net.Conn`.
+ sysConn
+}
+
+// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that
+// implements syscall.Conn. rawConn will be used to support syscall, and newConn
+// will be used for read/write.
+//
+// This function returns newConn if rawConn doesn't implement syscall.Conn.
+func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
+ sysConn, ok := rawConn.(syscall.Conn)
+ if !ok {
+ return newConn
+ }
+ return &syscallConn{
+ Conn: newConn,
+ sysConn: sysConn,
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go
new file mode 100644
index 0000000..f792fd2
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/credentials/util.go
@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package credentials
+
+import (
+ "crypto/tls"
+)
+
+const alpnProtoStrH2 = "h2"
+
+// AppendH2ToNextProtos appends h2 to next protos.
+func AppendH2ToNextProtos(ps []string) []string {
+ for _, p := range ps {
+ if p == alpnProtoStrH2 {
+ return ps
+ }
+ }
+ ret := make([]string, 0, len(ps)+1)
+ ret = append(ret, ps...)
+ return append(ret, alpnProtoStrH2)
+}
+
+// CloneTLSConfig returns a shallow clone of the exported
+// fields of cfg, ignoring the unexported sync.Once, which
+// contains a mutex and must not be copied.
+//
+// If cfg is nil, a new zero tls.Config is returned.
+//
+// TODO: inline this function if possible.
+func CloneTLSConfig(cfg *tls.Config) *tls.Config {
+ if cfg == nil {
+ return &tls.Config{}
+ }
+
+ return cfg.Clone()
+}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index ae6c897..6f02725 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -26,13 +26,10 @@
const (
prefix = "GRPC_GO_"
- retryStr = prefix + "RETRY"
txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS"
)
var (
- // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
- Retry = strings.EqualFold(os.Getenv(retryStr), "on")
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
- TXTErrIgnore = !strings.EqualFold(os.Getenv(retryStr), "false")
+ TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false")
)
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
new file mode 100644
index 0000000..9bad03c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
@@ -0,0 +1,97 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package envconfig
+
+import (
+ "os"
+ "strings"
+)
+
+const (
+ // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name.
+ // Do not use this and read from env directly. Its value is read and kept in
+ // variable BootstrapFileName.
+ //
+ // When both bootstrap FileName and FileContent are set, FileName is used.
+ XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP"
+ // XDSBootstrapFileContentEnv is the env variable to set bootstrapp file
+ // content. Do not use this and read from env directly. Its value is read
+ // and kept in variable BootstrapFileName.
+ //
+ // When both bootstrap FileName and FileContent are set, FileName is used.
+ XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG"
+
+ ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH"
+ clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT"
+ aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
+ rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC"
+ federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION"
+ rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB"
+
+ c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI"
+)
+
+var (
+ // XDSBootstrapFileName holds the name of the file which contains xDS
+ // bootstrap configuration. Users can specify the location of the bootstrap
+ // file by setting the environment variable "GRPC_XDS_BOOTSTRAP".
+ //
+ // When both bootstrap FileName and FileContent are set, FileName is used.
+ XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv)
+ // XDSBootstrapFileContent holds the content of the xDS bootstrap
+ // configuration. Users can specify the bootstrap config by setting the
+ // environment variable "GRPC_XDS_BOOTSTRAP_CONFIG".
+ //
+ // When both bootstrap FileName and FileContent are set, FileName is used.
+ XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv)
+ // XDSRingHash indicates whether ring hash support is enabled, which can be
+ // disabled by setting the environment variable
+ // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false".
+ XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false")
+ // XDSClientSideSecurity is used to control processing of security
+ // configuration on the client-side.
+ //
+ // Note that there is no env var protection for the server-side because we
+ // have a brand new API on the server-side and users explicitly need to use
+ // the new API to get security integration on the server.
+ XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false")
+ // XDSAggregateAndDNS indicates whether processing of aggregated cluster
+ // and DNS cluster is enabled, which can be enabled by setting the
+ // environment variable
+ // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to
+ // "true".
+ XDSAggregateAndDNS = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true")
+
+ // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
+ // which can be disabled by setting the environment variable
+ // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
+ XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false")
+
+ // XDSFederation indicates whether federation support is enabled.
+ XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true")
+
+ // XDSRLS indicates whether processing of Cluster Specifier plugins and
+ // support for the RLS CLuster Specifier is enabled, which can be enabled by
+ // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to
+ // "true".
+ XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true")
+
+ // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
+ C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv)
+)
diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
index 8c8e19f..30a3b42 100644
--- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
+++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
@@ -19,6 +19,10 @@
// Package grpclog (internal) defines depth logging for grpc.
package grpclog
+import (
+ "os"
+)
+
// Logger is the logger used for the non-depth log functions.
var Logger LoggerV2
@@ -30,7 +34,7 @@
if DepthLogger != nil {
DepthLogger.InfoDepth(depth, args...)
} else {
- Logger.Info(args...)
+ Logger.Infoln(args...)
}
}
@@ -39,7 +43,7 @@
if DepthLogger != nil {
DepthLogger.WarningDepth(depth, args...)
} else {
- Logger.Warning(args...)
+ Logger.Warningln(args...)
}
}
@@ -48,7 +52,7 @@
if DepthLogger != nil {
DepthLogger.ErrorDepth(depth, args...)
} else {
- Logger.Error(args...)
+ Logger.Errorln(args...)
}
}
@@ -57,8 +61,9 @@
if DepthLogger != nil {
DepthLogger.FatalDepth(depth, args...)
} else {
- Logger.Fatal(args...)
+ Logger.Fatalln(args...)
}
+ os.Exit(1)
}
// LoggerV2 does underlying logging work for grpclog.
@@ -105,14 +110,17 @@
// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
// It is defined here to avoid a circular dependency.
//
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type DepthLoggerV2 interface {
- // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print.
+ // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
InfoDepth(depth int, args ...interface{})
- // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print.
+ // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
WarningDepth(depth int, args ...interface{})
- // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print.
+ // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
ErrorDepth(depth int, args ...interface{})
- // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print.
+ // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
FatalDepth(depth int, args ...interface{})
}
diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
index f6e0dc1..82af70e 100644
--- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
+++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
@@ -18,10 +18,15 @@
package grpclog
+import (
+ "fmt"
+)
+
// PrefixLogger does logging with a prefix.
//
// Logging method on a nil logs without any prefix.
type PrefixLogger struct {
+ logger DepthLoggerV2
prefix string
}
@@ -30,34 +35,47 @@
if pl != nil {
// Handle nil, so the tests can pass in a nil logger.
format = pl.prefix + format
+ pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
+ return
}
- Logger.Infof(format, args...)
+ InfoDepth(1, fmt.Sprintf(format, args...))
}
// Warningf does warning logging.
func (pl *PrefixLogger) Warningf(format string, args ...interface{}) {
if pl != nil {
format = pl.prefix + format
+ pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
+ return
}
- Logger.Warningf(format, args...)
+ WarningDepth(1, fmt.Sprintf(format, args...))
}
// Errorf does error logging.
func (pl *PrefixLogger) Errorf(format string, args ...interface{}) {
if pl != nil {
format = pl.prefix + format
+ pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
+ return
}
- Logger.Errorf(format, args...)
+ ErrorDepth(1, fmt.Sprintf(format, args...))
}
// Debugf does info logging at verbose level 2.
func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
- if Logger.V(2) {
- pl.Infof(format, args...)
+ if !Logger.V(2) {
+ return
}
+ if pl != nil {
+ // Handle nil, so the tests can pass in a nil logger.
+ format = pl.prefix + format
+ pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
+ return
+ }
+ InfoDepth(1, fmt.Sprintf(format, args...))
}
// NewPrefixLogger creates a prefix logger with the given prefix.
-func NewPrefixLogger(prefix string) *PrefixLogger {
- return &PrefixLogger{prefix: prefix}
+func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger {
+ return &PrefixLogger{logger: logger, prefix: prefix}
}
diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
index 200b115..740f83c 100644
--- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
+++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
@@ -31,26 +31,37 @@
mu sync.Mutex
)
+// Int implements rand.Int on the grpcrand global source.
+func Int() int {
+ mu.Lock()
+ defer mu.Unlock()
+ return r.Int()
+}
+
// Int63n implements rand.Int63n on the grpcrand global source.
func Int63n(n int64) int64 {
mu.Lock()
- res := r.Int63n(n)
- mu.Unlock()
- return res
+ defer mu.Unlock()
+ return r.Int63n(n)
}
// Intn implements rand.Intn on the grpcrand global source.
func Intn(n int) int {
mu.Lock()
- res := r.Intn(n)
- mu.Unlock()
- return res
+ defer mu.Unlock()
+ return r.Intn(n)
}
// Float64 implements rand.Float64 on the grpcrand global source.
func Float64() float64 {
mu.Lock()
- res := r.Float64()
- mu.Unlock()
- return res
+ defer mu.Unlock()
+ return r.Float64()
+}
+
+// Uint64 implements rand.Uint64 on the grpcrand global source.
+func Uint64() uint64 {
+ mu.Lock()
+ defer mu.Unlock()
+ return r.Uint64()
}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go
new file mode 100644
index 0000000..b25b0ba
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go
@@ -0,0 +1,63 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+ "strconv"
+ "time"
+)
+
+const maxTimeoutValue int64 = 100000000 - 1
+
+// div does integer division and round-up the result. Note that this is
+// equivalent to (d+r-1)/r but has less chance to overflow.
+func div(d, r time.Duration) int64 {
+ if d%r > 0 {
+ return int64(d/r + 1)
+ }
+ return int64(d / r)
+}
+
+// EncodeDuration encodes the duration to the format grpc-timeout header
+// accepts.
+//
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
+func EncodeDuration(t time.Duration) string {
+ // TODO: This is simplistic and not bandwidth efficient. Improve it.
+ if t <= 0 {
+ return "0n"
+ }
+ if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "n"
+ }
+ if d := div(t, time.Microsecond); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "u"
+ }
+ if d := div(t, time.Millisecond); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "m"
+ }
+ if d := div(t, time.Second); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "S"
+ }
+ if d := div(t, time.Minute); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "M"
+ }
+ // Note that maxTimeoutValue * time.Hour > MaxInt64.
+ return strconv.FormatInt(div(t, time.Hour), 10) + "H"
+}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go
similarity index 67%
rename from vendor/google.golang.org/grpc/internal/resolver/dns/go113.go
rename to vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go
index 8783a8c..e2f948e 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go
@@ -1,8 +1,6 @@
-// +build go1.13
-
/*
*
- * Copyright 2019 gRPC authors.
+ * Copyright 2021 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,16 +16,5 @@
*
*/
-package dns
-
-import "net"
-
-func init() {
- filterError = func(err error) error {
- if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound {
- // The name does not exist; not an error.
- return nil
- }
- return err
- }
-}
+// Package grpcutil provides utility functions used across the gRPC codebase.
+package grpcutil
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go
new file mode 100644
index 0000000..6f22bd8
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go
@@ -0,0 +1,40 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+ "context"
+
+ "google.golang.org/grpc/metadata"
+)
+
+type mdExtraKey struct{}
+
+// WithExtraMetadata creates a new context with incoming md attached.
+func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context {
+ return context.WithValue(ctx, mdExtraKey{}, md)
+}
+
+// ExtraMetadata returns the incoming metadata in ctx if it exists. The
+// returned MD should not be modified. Writing to it may cause races.
+// Modification should be made to copies of the returned MD.
+func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) {
+ md, ok = ctx.Value(mdExtraKey{}).(metadata.MD)
+ return
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
new file mode 100644
index 0000000..4e74750
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
@@ -0,0 +1,84 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+ "errors"
+ "strings"
+)
+
+// ParseMethod splits service and method from the input. It expects format
+// "/service/method".
+//
+func ParseMethod(methodName string) (service, method string, _ error) {
+ if !strings.HasPrefix(methodName, "/") {
+ return "", "", errors.New("invalid method name: should start with /")
+ }
+ methodName = methodName[1:]
+
+ pos := strings.LastIndex(methodName, "/")
+ if pos < 0 {
+ return "", "", errors.New("invalid method name: suffix /method is missing")
+ }
+ return methodName[:pos], methodName[pos+1:], nil
+}
+
+const baseContentType = "application/grpc"
+
+// ContentSubtype returns the content-subtype for the given content-type. The
+// given content-type must be a valid content-type that starts with
+// "application/grpc". A content-subtype will follow "application/grpc" after a
+// "+" or ";". See
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+//
+// If contentType is not a valid content-type for gRPC, the boolean
+// will be false, otherwise true. If content-type == "application/grpc",
+// "application/grpc+", or "application/grpc;", the boolean will be true,
+// but no content-subtype will be returned.
+//
+// contentType is assumed to be lowercase already.
+func ContentSubtype(contentType string) (string, bool) {
+ if contentType == baseContentType {
+ return "", true
+ }
+ if !strings.HasPrefix(contentType, baseContentType) {
+ return "", false
+ }
+ // guaranteed since != baseContentType and has baseContentType prefix
+ switch contentType[len(baseContentType)] {
+ case '+', ';':
+ // this will return true for "application/grpc+" or "application/grpc;"
+ // which the previous validContentType function tested to be valid, so we
+ // just say that no content-subtype is specified in this case
+ return contentType[len(baseContentType)+1:], true
+ default:
+ return "", false
+ }
+}
+
+// ContentType builds full content type with the given sub-type.
+//
+// contentSubtype is assumed to be lowercase
+func ContentType(contentSubtype string) string {
+ if contentSubtype == "" {
+ return baseContentType
+ }
+ return baseContentType + "+" + contentSubtype
+}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go
similarity index 63%
copy from vendor/google.golang.org/grpc/internal/resolver/dns/go113.go
copy to vendor/google.golang.org/grpc/internal/grpcutil/regex.go
index 8783a8c..7a092b2 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go
@@ -1,8 +1,6 @@
-// +build go1.13
-
/*
*
- * Copyright 2019 gRPC authors.
+ * Copyright 2021 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,16 +16,16 @@
*
*/
-package dns
+package grpcutil
-import "net"
+import "regexp"
-func init() {
- filterError = func(err error) error {
- if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound {
- // The name does not exist; not an error.
- return nil
- }
- return err
+// FullMatchWithRegex returns whether the full text matches the regex provided.
+func FullMatchWithRegex(re *regexp.Regexp, text string) bool {
+ if len(text) == 0 {
+ return re.MatchString(text)
}
+ re.Longest()
+ rem := re.FindString(text)
+ return len(rem) == len(text)
}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/target.go b/vendor/google.golang.org/grpc/internal/grpcutil/target.go
deleted file mode 100644
index 80b33cd..0000000
--- a/vendor/google.golang.org/grpc/internal/grpcutil/target.go
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- *
- * Copyright 2020 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package grpcutil provides a bunch of utility functions to be used across the
-// gRPC codebase.
-package grpcutil
-
-import (
- "strings"
-
- "google.golang.org/grpc/resolver"
-)
-
-// split2 returns the values from strings.SplitN(s, sep, 2).
-// If sep is not found, it returns ("", "", false) instead.
-func split2(s, sep string) (string, string, bool) {
- spl := strings.SplitN(s, sep, 2)
- if len(spl) < 2 {
- return "", "", false
- }
- return spl[0], spl[1], true
-}
-
-// ParseTarget splits target into a resolver.Target struct containing scheme,
-// authority and endpoint.
-//
-// If target is not a valid scheme://authority/endpoint, it returns {Endpoint:
-// target}.
-func ParseTarget(target string) (ret resolver.Target) {
- var ok bool
- ret.Scheme, ret.Endpoint, ok = split2(target, "://")
- if !ok {
- return resolver.Target{Endpoint: target}
- }
- ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/")
- if !ok {
- return resolver.Target{Endpoint: target}
- }
- return ret
-}
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index c6fbe8b..1b596bf 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -25,6 +25,7 @@
"time"
"google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/serviceconfig"
)
var (
@@ -37,12 +38,32 @@
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
// default, but tests may wish to set it lower for convenience.
KeepaliveMinPingTime = 10 * time.Second
- // NewRequestInfoContext creates a new context based on the argument context attaching
- // the passed in RequestInfo to the new context.
- NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context
// ParseServiceConfigForTesting is for creating a fake
// ClientConn for resolver testing only
ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult
+ // EqualServiceConfigForTesting is for testing service config generation and
+ // parsing. Both a and b should be returned by ParseServiceConfigForTesting.
+ // This function compares the config without rawJSON stripped, in case the
+ // there's difference in white space.
+ EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool
+ // GetCertificateProviderBuilder returns the registered builder for the
+ // given name. This is set by package certprovider for use from xDS
+ // bootstrap code while parsing certificate provider configs in the
+ // bootstrap file.
+ GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder
+ // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo
+ // stored in the passed in attributes. This is set by
+ // credentials/xds/xds.go.
+ GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo
+ // GetServerCredentials returns the transport credentials configured on a
+ // gRPC server. An xDS-enabled server needs to know what type of credentials
+ // is configured on the underlying gRPC server. This is set by server.go.
+ GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials
+ // DrainServerTransports initiates a graceful close of existing connections
+ // on a gRPC server accepted on the provided listener address. An
+ // xDS-enabled server invokes this method on a grpc.Server when a particular
+ // listener moves to "not-serving" mode.
+ DrainServerTransports interface{} // func(*grpc.Server, string)
)
// HealthChecker defines the signature of the client-side LB channel health checking function.
diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go
new file mode 100644
index 0000000..b8733db
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go
@@ -0,0 +1,74 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package metadata contains functions to set and get metadata from addresses.
+//
+// This package is experimental.
+package metadata
+
+import (
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/resolver"
+)
+
+type mdKeyType string
+
+const mdKey = mdKeyType("grpc.internal.address.metadata")
+
+type mdValue metadata.MD
+
+func (m mdValue) Equal(o interface{}) bool {
+ om, ok := o.(mdValue)
+ if !ok {
+ return false
+ }
+ if len(m) != len(om) {
+ return false
+ }
+ for k, v := range m {
+ ov := om[k]
+ if len(ov) != len(v) {
+ return false
+ }
+ for i, ve := range v {
+ if ov[i] != ve {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Get returns the metadata of addr.
+func Get(addr resolver.Address) metadata.MD {
+ attrs := addr.Attributes
+ if attrs == nil {
+ return nil
+ }
+ md, _ := attrs.Value(mdKey).(mdValue)
+ return metadata.MD(md)
+}
+
+// Set sets (overrides) the metadata in addr.
+//
+// When a SubConn is created with this address, the RPCs sent on it will all
+// have this metadata.
+func Set(addr resolver.Address, md metadata.MD) resolver.Address {
+ addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md))
+ return addr
+}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go
new file mode 100644
index 0000000..c7a18a9
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go
@@ -0,0 +1,167 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package resolver provides internal resolver-related functionality.
+package resolver
+
+import (
+ "context"
+ "sync"
+
+ "google.golang.org/grpc/internal/serviceconfig"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/resolver"
+)
+
+// ConfigSelector controls what configuration to use for every RPC.
+type ConfigSelector interface {
+ // Selects the configuration for the RPC, or terminates it using the error.
+ // This error will be converted by the gRPC library to a status error with
+ // code UNKNOWN if it is not returned as a status error.
+ SelectConfig(RPCInfo) (*RPCConfig, error)
+}
+
+// RPCInfo contains RPC information needed by a ConfigSelector.
+type RPCInfo struct {
+ // Context is the user's context for the RPC and contains headers and
+ // application timeout. It is passed for interception purposes and for
+ // efficiency reasons. SelectConfig should not be blocking.
+ Context context.Context
+ Method string // i.e. "/Service/Method"
+}
+
+// RPCConfig describes the configuration to use for each RPC.
+type RPCConfig struct {
+ // The context to use for the remainder of the RPC; can pass info to LB
+ // policy or affect timeout or metadata.
+ Context context.Context
+ MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC
+ OnCommitted func() // Called when the RPC has been committed (retries no longer possible)
+ Interceptor ClientInterceptor
+}
+
+// ClientStream is the same as grpc.ClientStream, but defined here for circular
+// dependency reasons.
+type ClientStream interface {
+ // Header returns the header metadata received from the server if there
+ // is any. It blocks if the metadata is not ready to read.
+ Header() (metadata.MD, error)
+ // Trailer returns the trailer metadata from the server, if there is any.
+ // It must only be called after stream.CloseAndRecv has returned, or
+ // stream.Recv has returned a non-nil error (including io.EOF).
+ Trailer() metadata.MD
+ // CloseSend closes the send direction of the stream. It closes the stream
+ // when non-nil error is met. It is also not safe to call CloseSend
+ // concurrently with SendMsg.
+ CloseSend() error
+ // Context returns the context for this stream.
+ //
+ // It should not be called until after Header or RecvMsg has returned. Once
+ // called, subsequent client-side retries are disabled.
+ Context() context.Context
+ // SendMsg is generally called by generated code. On error, SendMsg aborts
+ // the stream. If the error was generated by the client, the status is
+ // returned directly; otherwise, io.EOF is returned and the status of
+ // the stream may be discovered using RecvMsg.
+ //
+ // SendMsg blocks until:
+ // - There is sufficient flow control to schedule m with the transport, or
+ // - The stream is done, or
+ // - The stream breaks.
+ //
+ // SendMsg does not wait until the message is received by the server. An
+ // untimely stream closure may result in lost messages. To ensure delivery,
+ // users should ensure the RPC completed successfully using RecvMsg.
+ //
+ // It is safe to have a goroutine calling SendMsg and another goroutine
+ // calling RecvMsg on the same stream at the same time, but it is not safe
+ // to call SendMsg on the same stream in different goroutines. It is also
+ // not safe to call CloseSend concurrently with SendMsg.
+ SendMsg(m interface{}) error
+ // RecvMsg blocks until it receives a message into m or the stream is
+ // done. It returns io.EOF when the stream completes successfully. On
+ // any other error, the stream is aborted and the error contains the RPC
+ // status.
+ //
+ // It is safe to have a goroutine calling SendMsg and another goroutine
+ // calling RecvMsg on the same stream at the same time, but it is not
+ // safe to call RecvMsg on the same stream in different goroutines.
+ RecvMsg(m interface{}) error
+}
+
+// ClientInterceptor is an interceptor for gRPC client streams.
+type ClientInterceptor interface {
+ // NewStream produces a ClientStream for an RPC which may optionally use
+ // the provided function to produce a stream for delegation. Note:
+ // RPCInfo.Context should not be used (will be nil).
+ //
+ // done is invoked when the RPC is finished using its connection, or could
+ // not be assigned a connection. RPC operations may still occur on
+ // ClientStream after done is called, since the interceptor is invoked by
+ // application-layer operations. done must never be nil when called.
+ NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error)
+}
+
+// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side.
+type ServerInterceptor interface {
+ // AllowRPC checks if an incoming RPC is allowed to proceed based on
+ // information about connection RPC was received on, and HTTP Headers. This
+ // information will be piped into context.
+ AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting.
+}
+
+type csKeyType string
+
+const csKey = csKeyType("grpc.internal.resolver.configSelector")
+
+// SetConfigSelector sets the config selector in state and returns the new
+// state.
+func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State {
+ state.Attributes = state.Attributes.WithValue(csKey, cs)
+ return state
+}
+
+// GetConfigSelector retrieves the config selector from state, if present, and
+// returns it or nil if absent.
+func GetConfigSelector(state resolver.State) ConfigSelector {
+ cs, _ := state.Attributes.Value(csKey).(ConfigSelector)
+ return cs
+}
+
+// SafeConfigSelector allows for safe switching of ConfigSelector
+// implementations such that previous values are guaranteed to not be in use
+// when UpdateConfigSelector returns.
+type SafeConfigSelector struct {
+ mu sync.RWMutex
+ cs ConfigSelector
+}
+
+// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until
+// all uses of the previous ConfigSelector have completed.
+func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) {
+ scs.mu.Lock()
+ defer scs.mu.Unlock()
+ scs.cs = cs
+}
+
+// SelectConfig defers to the current ConfigSelector in scs.
+func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) {
+ scs.mu.RLock()
+ defer scs.mu.RUnlock()
+ return scs.cs.SelectConfig(r)
+}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
index c368db6..75301c5 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
@@ -32,7 +32,9 @@
"sync"
"time"
+ grpclbstate "google.golang.org/grpc/balancer/grpclb/state"
"google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal/backoff"
"google.golang.org/grpc/internal/envconfig"
"google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/resolver"
@@ -43,6 +45,15 @@
// addresses from SRV records. Must not be changed after init time.
var EnableSRVLookups = false
+var logger = grpclog.Component("dns")
+
+// Globals to stub out in tests. TODO: Perhaps these two can be combined into a
+// single variable for testing the resolver?
+var (
+ newTimer = time.NewTimer
+ newTimerDNSResRate = time.NewTimer
+)
+
func init() {
resolver.Register(NewBuilder())
}
@@ -140,7 +151,6 @@
d.wg.Add(1)
go d.watcher()
- d.ResolveNow(resolver.ResolveNowOptions{})
return d, nil
}
@@ -198,28 +208,38 @@
func (d *dnsResolver) watcher() {
defer d.wg.Done()
+ backoffIndex := 1
for {
- select {
- case <-d.ctx.Done():
- return
- case <-d.rn:
- }
-
state, err := d.lookup()
if err != nil {
+ // Report error to the underlying grpc.ClientConn.
d.cc.ReportError(err)
} else {
- d.cc.UpdateState(*state)
+ err = d.cc.UpdateState(*state)
}
- // Sleep to prevent excessive re-resolutions. Incoming resolution requests
- // will be queued in d.rn.
- t := time.NewTimer(minDNSResRate)
+ var timer *time.Timer
+ if err == nil {
+ // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least
+ // to prevent constantly re-resolving.
+ backoffIndex = 1
+ timer = newTimerDNSResRate(minDNSResRate)
+ select {
+ case <-d.ctx.Done():
+ timer.Stop()
+ return
+ case <-d.rn:
+ }
+ } else {
+ // Poll on an error found in DNS Resolver or an error received from ClientConn.
+ timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex))
+ backoffIndex++
+ }
select {
- case <-t.C:
case <-d.ctx.Done():
- t.Stop()
+ timer.Stop()
return
+ case <-timer.C:
}
}
}
@@ -251,27 +271,22 @@
return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
}
addr := ip + ":" + strconv.Itoa(int(s.Port))
- newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target})
+ newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target})
}
}
return newAddrs, nil
}
-var filterError = func(err error) error {
+func handleDNSError(err error, lookupType string) error {
if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
// Timeouts and temporary errors should be communicated to gRPC to
// attempt another DNS query (with backoff). Other errors should be
// suppressed (they may represent the absence of a TXT record).
return nil
}
- return err
-}
-
-func handleDNSError(err error, lookupType string) error {
- err = filterError(err)
if err != nil {
err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err)
- grpclog.Infoln(err)
+ logger.Info(err)
}
return err
}
@@ -294,7 +309,7 @@
// TXT record must have "grpc_config=" attribute in order to be used as service config.
if !strings.HasPrefix(res, txtAttribute) {
- grpclog.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
+ logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
// This is not an error; it is the equivalent of not having a service config.
return nil
}
@@ -303,12 +318,12 @@
}
func (d *dnsResolver) lookupHost() ([]resolver.Address, error) {
- var newAddrs []resolver.Address
addrs, err := d.resolver.LookupHost(d.ctx, d.host)
if err != nil {
err = handleDNSError(err, "A")
return nil, err
}
+ newAddrs := make([]resolver.Address, 0, len(addrs))
for _, a := range addrs {
ip, ok := formatIP(a)
if !ok {
@@ -326,13 +341,15 @@
if hostErr != nil && (srvErr != nil || len(srv) == 0) {
return nil, hostErr
}
- state := &resolver.State{
- Addresses: append(addrs, srv...),
+
+ state := resolver.State{Addresses: addrs}
+ if len(srv) > 0 {
+ state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv})
}
if !d.disableServiceConfig {
state.ServiceConfig = d.lookupTXT()
}
- return state, nil
+ return &state, nil
}
// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
@@ -418,12 +435,12 @@
var rcs []rawChoice
err := json.Unmarshal([]byte(js), &rcs)
if err != nil {
- grpclog.Warningf("dns: error parsing service config json: %v", err)
+ logger.Warningf("dns: error parsing service config json: %v", err)
return ""
}
cliHostname, err := os.Hostname()
if err != nil {
- grpclog.Warningf("dns: error getting client hostname: %v", err)
+ logger.Warningf("dns: error getting client hostname: %v", err)
return ""
}
var sc string
diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
new file mode 100644
index 0000000..20852e5
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
@@ -0,0 +1,73 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package unix implements a resolver for unix targets.
+package unix
+
+import (
+ "fmt"
+
+ "google.golang.org/grpc/internal/transport/networktype"
+ "google.golang.org/grpc/resolver"
+)
+
+const unixScheme = "unix"
+const unixAbstractScheme = "unix-abstract"
+
+type builder struct {
+ scheme string
+}
+
+func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
+ if target.Authority != "" {
+ return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority)
+ }
+
+ // gRPC was parsing the dial target manually before PR #4817, and we
+ // switched to using url.Parse() in that PR. To avoid breaking existing
+ // resolver implementations we ended up stripping the leading "/" from the
+ // endpoint. This obviously does not work for the "unix" scheme. Hence we
+ // end up using the parsed URL instead.
+ endpoint := target.URL.Path
+ if endpoint == "" {
+ endpoint = target.URL.Opaque
+ }
+ addr := resolver.Address{Addr: endpoint}
+ if b.scheme == unixAbstractScheme {
+ // prepend "\x00" to address for unix-abstract
+ addr.Addr = "\x00" + addr.Addr
+ }
+ cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}})
+ return &nopResolver{}, nil
+}
+
+func (b *builder) Scheme() string {
+ return b.scheme
+}
+
+type nopResolver struct {
+}
+
+func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {}
+
+func (*nopResolver) Close() {}
+
+func init() {
+ resolver.Register(&builder{scheme: unixScheme})
+ resolver.Register(&builder{scheme: unixAbstractScheme})
+}
diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
new file mode 100644
index 0000000..badbdbf
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
@@ -0,0 +1,180 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package serviceconfig contains utility functions to parse service config.
+package serviceconfig
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ externalserviceconfig "google.golang.org/grpc/serviceconfig"
+)
+
+var logger = grpclog.Component("core")
+
+// BalancerConfig wraps the name and config associated with one load balancing
+// policy. It corresponds to a single entry of the loadBalancingConfig field
+// from ServiceConfig.
+//
+// It implements the json.Unmarshaler interface.
+//
+// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247
+type BalancerConfig struct {
+ Name string
+ Config externalserviceconfig.LoadBalancingConfig
+}
+
+type intermediateBalancerConfig []map[string]json.RawMessage
+
+// MarshalJSON implements the json.Marshaler interface.
+//
+// It marshals the balancer and config into a length-1 slice
+// ([]map[string]config).
+func (bc *BalancerConfig) MarshalJSON() ([]byte, error) {
+ if bc.Config == nil {
+ // If config is nil, return empty config `{}`.
+ return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil
+ }
+ c, err := json.Marshal(bc.Config)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+//
+// ServiceConfig contains a list of loadBalancingConfigs, each with a name and
+// config. This method iterates through that list in order, and stops at the
+// first policy that is supported.
+// - If the config for the first supported policy is invalid, the whole service
+// config is invalid.
+// - If the list doesn't contain any supported policy, the whole service config
+// is invalid.
+func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
+ var ir intermediateBalancerConfig
+ err := json.Unmarshal(b, &ir)
+ if err != nil {
+ return err
+ }
+
+ var names []string
+ for i, lbcfg := range ir {
+ if len(lbcfg) != 1 {
+ return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
+ }
+
+ var (
+ name string
+ jsonCfg json.RawMessage
+ )
+ // Get the key:value pair from the map. We have already made sure that
+ // the map contains a single entry.
+ for name, jsonCfg = range lbcfg {
+ }
+
+ names = append(names, name)
+ builder := balancer.Get(name)
+ if builder == nil {
+ // If the balancer is not registered, move on to the next config.
+ // This is not an error.
+ continue
+ }
+ bc.Name = name
+
+ parser, ok := builder.(balancer.ConfigParser)
+ if !ok {
+ if string(jsonCfg) != "{}" {
+ logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
+ }
+ // Stop at this, though the builder doesn't support parsing config.
+ return nil
+ }
+
+ cfg, err := parser.ParseConfig(jsonCfg)
+ if err != nil {
+ return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)
+ }
+ bc.Config = cfg
+ return nil
+ }
+ // This is reached when the for loop iterates over all entries, but didn't
+ // return. This means we had a loadBalancingConfig slice but did not
+ // encounter a registered policy. The config is considered invalid in this
+ // case.
+ return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names)
+}
+
+// MethodConfig defines the configuration recommended by the service providers for a
+// particular method.
+type MethodConfig struct {
+ // WaitForReady indicates whether RPCs sent to this method should wait until
+ // the connection is ready by default (!failfast). The value specified via the
+ // gRPC client API will override the value set here.
+ WaitForReady *bool
+ // Timeout is the default timeout for RPCs sent to this method. The actual
+ // deadline used will be the minimum of the value specified here and the value
+ // set by the application via the gRPC client API. If either one is not set,
+ // then the other will be used. If neither is set, then the RPC has no deadline.
+ Timeout *time.Duration
+ // MaxReqSize is the maximum allowed payload size for an individual request in a
+ // stream (client->server) in bytes. The size which is measured is the serialized
+ // payload after per-message compression (but before stream compression) in bytes.
+ // The actual value used is the minimum of the value specified here and the value set
+ // by the application via the gRPC client API. If either one is not set, then the other
+ // will be used. If neither is set, then the built-in default is used.
+ MaxReqSize *int
+ // MaxRespSize is the maximum allowed payload size for an individual response in a
+ // stream (server->client) in bytes.
+ MaxRespSize *int
+ // RetryPolicy configures retry options for the method.
+ RetryPolicy *RetryPolicy
+}
+
+// RetryPolicy defines the go-native version of the retry policy defined by the
+// service config here:
+// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
+type RetryPolicy struct {
+ // MaxAttempts is the maximum number of attempts, including the original RPC.
+ //
+ // This field is required and must be two or greater.
+ MaxAttempts int
+
+ // Exponential backoff parameters. The initial retry attempt will occur at
+ // random(0, initialBackoff). In general, the nth attempt will occur at
+ // random(0,
+ // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)).
+ //
+ // These fields are required and must be greater than zero.
+ InitialBackoff time.Duration
+ MaxBackoff time.Duration
+ BackoffMultiplier float64
+
+ // The set of status codes which may be retried.
+ //
+ // Status codes are specified as strings, e.g., "UNAVAILABLE".
+ //
+ // This field is required and must be non-empty.
+ // Note: a set is used to store this for easy lookup.
+ RetryableStatusCodes map[codes.Code]bool
+}
diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go
index 6812606..e5c6513 100644
--- a/vendor/google.golang.org/grpc/internal/status/status.go
+++ b/vendor/google.golang.org/grpc/internal/status/status.go
@@ -97,7 +97,7 @@
if s.Code() == codes.OK {
return nil
}
- return (*Error)(s.Proto())
+ return &Error{s: s}
}
// WithDetails returns a new status with the provided details messages appended to the status.
@@ -136,26 +136,31 @@
return details
}
-// Error is an alias of a status proto. It implements error and Status,
-// and a nil Error should never be returned by this package.
-type Error spb.Status
+func (s *Status) String() string {
+ return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message())
+}
-func (se *Error) Error() string {
- p := (*spb.Status)(se)
- return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
+// Error wraps a pointer of a status proto. It implements error and Status,
+// and a nil *Error should never be returned by this package.
+type Error struct {
+ s *Status
+}
+
+func (e *Error) Error() string {
+ return e.s.String()
}
// GRPCStatus returns the Status represented by se.
-func (se *Error) GRPCStatus() *Status {
- return FromProto((*spb.Status)(se))
+func (e *Error) GRPCStatus() *Status {
+ return e.s
}
// Is implements future error.Is functionality.
// A Error is equivalent if the code and message are identical.
-func (se *Error) Is(target error) bool {
+func (e *Error) Is(target error) bool {
tse, ok := target.(*Error)
if !ok {
return false
}
- return proto.Equal((*spb.Status)(se), (*spb.Status)(tse))
+ return proto.Equal(e.s.s, tse.s.s)
}
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
index 43281a3..b3a7227 100644
--- a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
+++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
@@ -1,5 +1,3 @@
-// +build !appengine
-
/*
*
* Copyright 2018 gRPC authors.
@@ -32,35 +30,35 @@
"google.golang.org/grpc/grpclog"
)
+var logger = grpclog.Component("core")
+
// GetCPUTime returns the how much CPU time has passed since the start of this process.
func GetCPUTime() int64 {
var ts unix.Timespec
if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil {
- grpclog.Fatal(err)
+ logger.Fatal(err)
}
return ts.Nano()
}
-// Rusage is an alias for syscall.Rusage under linux non-appengine environment.
-type Rusage syscall.Rusage
+// Rusage is an alias for syscall.Rusage under linux environment.
+type Rusage = syscall.Rusage
// GetRusage returns the resource usage of current process.
-func GetRusage() (rusage *Rusage) {
- rusage = new(Rusage)
- syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage))
- return
+func GetRusage() *Rusage {
+ rusage := new(Rusage)
+ syscall.Getrusage(syscall.RUSAGE_SELF, rusage)
+ return rusage
}
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
// between two Rusage structs.
func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
- f := (*syscall.Rusage)(first)
- l := (*syscall.Rusage)(latest)
var (
- utimeDiffs = l.Utime.Sec - f.Utime.Sec
- utimeDiffus = l.Utime.Usec - f.Utime.Usec
- stimeDiffs = l.Stime.Sec - f.Stime.Sec
- stimeDiffus = l.Stime.Usec - f.Stime.Usec
+ utimeDiffs = latest.Utime.Sec - first.Utime.Sec
+ utimeDiffus = latest.Utime.Usec - first.Utime.Usec
+ stimeDiffs = latest.Stime.Sec - first.Stime.Sec
+ stimeDiffus = latest.Stime.Usec - first.Stime.Usec
)
uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
index d3fd9da..999f52c 100644
--- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
+++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
@@ -1,4 +1,5 @@
-// +build !linux appengine
+//go:build !linux
+// +build !linux
/*
*
@@ -18,6 +19,8 @@
*
*/
+// Package syscall provides functionalities that grpc uses to get low-level
+// operating system stats/info.
package syscall
import (
@@ -29,44 +32,45 @@
)
var once sync.Once
+var logger = grpclog.Component("core")
func log() {
once.Do(func() {
- grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.")
+ logger.Info("CPU time info is unavailable on non-linux environments.")
})
}
-// GetCPUTime returns the how much CPU time has passed since the start of this process.
-// It always returns 0 under non-linux or appengine environment.
+// GetCPUTime returns the how much CPU time has passed since the start of this
+// process. It always returns 0 under non-linux environments.
func GetCPUTime() int64 {
log()
return 0
}
-// Rusage is an empty struct under non-linux or appengine environment.
+// Rusage is an empty struct under non-linux environments.
type Rusage struct{}
-// GetRusage is a no-op function under non-linux or appengine environment.
-func GetRusage() (rusage *Rusage) {
+// GetRusage is a no-op function under non-linux environments.
+func GetRusage() *Rusage {
log()
return nil
}
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
-// between two Rusage structs. It a no-op function for non-linux or appengine environment.
+// between two Rusage structs. It a no-op function for non-linux environments.
func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
log()
return 0, 0
}
-// SetTCPUserTimeout is a no-op function under non-linux or appengine environments
+// SetTCPUserTimeout is a no-op function under non-linux environments.
func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
log()
return nil
}
-// GetTCPUserTimeout is a no-op function under non-linux or appengine environments
-// a negative return value indicates the operation is not supported
+// GetTCPUserTimeout is a no-op function under non-linux environments.
+// A negative return value indicates the operation is not supported
func GetTCPUserTimeout(conn net.Conn) (int, error) {
log()
return -1, nil
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
index ddee20b..8394d25 100644
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -20,13 +20,17 @@
import (
"bytes"
+ "errors"
"fmt"
"runtime"
+ "strconv"
"sync"
"sync/atomic"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
+ "google.golang.org/grpc/internal/grpcutil"
+ "google.golang.org/grpc/status"
)
var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
@@ -128,6 +132,15 @@
func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM
+type earlyAbortStream struct {
+ httpStatus uint32
+ streamID uint32
+ contentSubtype string
+ status *status.Status
+}
+
+func (*earlyAbortStream) isTransportResponseFrame() bool { return false }
+
type dataFrame struct {
streamID uint32
endStream bool
@@ -284,7 +297,7 @@
// closed and nilled when transportResponseFrames drops below the
// threshold. Both fields are protected by mu.
transportResponseFrames int
- trfChan atomic.Value // *chan struct{}
+ trfChan atomic.Value // chan struct{}
}
func newControlBuffer(done <-chan struct{}) *controlBuffer {
@@ -298,10 +311,10 @@
// throttle blocks if there are too many incomingSettings/cleanupStreams in the
// controlbuf.
func (c *controlBuffer) throttle() {
- ch, _ := c.trfChan.Load().(*chan struct{})
+ ch, _ := c.trfChan.Load().(chan struct{})
if ch != nil {
select {
- case <-*ch:
+ case <-ch:
case <-c.done:
}
}
@@ -335,8 +348,7 @@
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
// We are adding the frame that puts us over the threshold; create
// a throttling channel.
- ch := make(chan struct{})
- c.trfChan.Store(&ch)
+ c.trfChan.Store(make(chan struct{}))
}
}
c.mu.Unlock()
@@ -377,9 +389,9 @@
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
// We are removing the frame that put us over the
// threshold; close and clear the throttling channel.
- ch := c.trfChan.Load().(*chan struct{})
- close(*ch)
- c.trfChan.Store((*chan struct{})(nil))
+ ch := c.trfChan.Load().(chan struct{})
+ close(ch)
+ c.trfChan.Store((chan struct{})(nil))
}
c.transportResponseFrames--
}
@@ -395,7 +407,6 @@
select {
case <-c.ch:
case <-c.done:
- c.finish()
return nil, ErrConnClosing
}
}
@@ -420,6 +431,14 @@
hdr.onOrphaned(ErrConnClosing)
}
}
+ // In case throttle() is currently in flight, it needs to be unblocked.
+ // Otherwise, the transport may not close, since the transport is closed by
+ // the reader encountering the connection error.
+ ch, _ := c.trfChan.Load().(chan struct{})
+ if ch != nil {
+ close(ch)
+ }
+ c.trfChan.Store((chan struct{})(nil))
c.mu.Unlock()
}
@@ -505,7 +524,9 @@
// 1. When the connection is closed by some other known issue.
// 2. User closed the connection.
// 3. A graceful close of connection.
- infof("transport: loopyWriter.run returning. %v", err)
+ if logger.V(logLevel) {
+ logger.Infof("transport: loopyWriter.run returning. %v", err)
+ }
err = nil
}
}()
@@ -605,7 +626,9 @@
if l.side == serverSide {
str, ok := l.estdStreams[h.streamID]
if !ok {
- warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
+ if logger.V(logLevel) {
+ logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
+ }
return nil
}
// Case 1.A: Server is responding back with headers.
@@ -658,7 +681,9 @@
l.hBuf.Reset()
for _, f := range hf {
if err := l.hEnc.WriteField(f); err != nil {
- warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err)
+ if logger.V(logLevel) {
+ logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err)
+ }
}
}
var (
@@ -743,6 +768,27 @@
return nil
}
+func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error {
+ if l.side == clientSide {
+ return errors.New("earlyAbortStream not handled on client")
+ }
+ // In case the caller forgets to set the http status, default to 200.
+ if eas.httpStatus == 0 {
+ eas.httpStatus = 200
+ }
+ headerFields := []hpack.HeaderField{
+ {Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))},
+ {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)},
+ {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))},
+ {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())},
+ }
+
+ if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil {
+ return err
+ }
+ return nil
+}
+
func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
if l.side == clientSide {
l.draining = true
@@ -781,6 +827,8 @@
return l.registerStreamHandler(i)
case *cleanupStream:
return l.cleanupStreamHandler(i)
+ case *earlyAbortStream:
+ return l.earlyAbortStreamHandler(i)
case *incomingGoAway:
return l.incomingGoAwayHandler(i)
case *dataFrame:
@@ -857,38 +905,45 @@
return false, nil
}
var (
- idx int
buf []byte
)
- if len(dataItem.h) != 0 { // data header has not been written out yet.
- buf = dataItem.h
- } else {
- idx = 1
- buf = dataItem.d
- }
- size := http2MaxFrameLen
- if len(buf) < size {
- size = len(buf)
- }
+ // Figure out the maximum size we can send
+ maxSize := http2MaxFrameLen
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
str.state = waitingOnStreamQuota
return false, nil
- } else if strQuota < size {
- size = strQuota
+ } else if maxSize > strQuota {
+ maxSize = strQuota
+ }
+ if maxSize > int(l.sendQuota) { // connection-level flow control.
+ maxSize = int(l.sendQuota)
+ }
+ // Compute how much of the header and data we can send within quota and max frame length
+ hSize := min(maxSize, len(dataItem.h))
+ dSize := min(maxSize-hSize, len(dataItem.d))
+ if hSize != 0 {
+ if dSize == 0 {
+ buf = dataItem.h
+ } else {
+ // We can add some data to grpc message header to distribute bytes more equally across frames.
+ // Copy on the stack to avoid generating garbage
+ var localBuf [http2MaxFrameLen]byte
+ copy(localBuf[:hSize], dataItem.h)
+ copy(localBuf[hSize:], dataItem.d[:dSize])
+ buf = localBuf[:hSize+dSize]
+ }
+ } else {
+ buf = dataItem.d
}
- if l.sendQuota < uint32(size) { // connection-level flow control.
- size = int(l.sendQuota)
- }
+ size := hSize + dSize
+
// Now that outgoing flow controls are checked we can replenish str's write quota
str.wq.replenish(size)
var endStream bool
// If this is the last data message on this stream and all of it can be written in this iteration.
- if dataItem.endStream && size == len(buf) {
- // buf contains either data or it contains header but data is empty.
- if idx == 1 || len(dataItem.d) == 0 {
- endStream = true
- }
+ if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size {
+ endStream = true
}
if dataItem.onEachWrite != nil {
dataItem.onEachWrite()
@@ -896,14 +951,10 @@
if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
return false, err
}
- buf = buf[size:]
str.bytesOutStanding += size
l.sendQuota -= uint32(size)
- if idx == 0 {
- dataItem.h = buf
- } else {
- dataItem.d = buf
- }
+ dataItem.h = dataItem.h[hSize:]
+ dataItem.d = dataItem.d[dSize:]
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
str.itl.dequeue()
@@ -924,3 +975,10 @@
}
return false, nil
}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
index f262edd..97198c5 100644
--- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
+++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
@@ -136,12 +136,10 @@
// newLimit updates the inflow window to a new value n.
// It assumes that n is always greater than the old limit.
-func (f *inFlow) newLimit(n uint32) uint32 {
+func (f *inFlow) newLimit(n uint32) {
f.mu.Lock()
- d := n - f.limit
f.limit = n
f.mu.Unlock()
- return d
}
func (f *inFlow) maybeAdjust(n uint32) uint32 {
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index fc44e97..1c3459c 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -39,6 +39,7 @@
"golang.org/x/net/http2"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@@ -57,7 +58,7 @@
}
contentType := r.Header.Get("Content-Type")
// TODO: do we assume contentType is lowercase? we did before
- contentSubtype, validContentType := contentSubtype(contentType)
+ contentSubtype, validContentType := grpcutil.ContentSubtype(contentType)
if !validContentType {
return nil, errors.New("invalid gRPC request content-type")
}
@@ -140,9 +141,8 @@
stats stats.Handler
}
-func (ht *serverHandlerTransport) Close() error {
+func (ht *serverHandlerTransport) Close() {
ht.closeOnce.Do(ht.closeCloseChanOnce)
- return nil
}
func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) }
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index 1cc586f..f0c72d3 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -24,6 +24,8 @@
"io"
"math"
"net"
+ "net/http"
+ "path/filepath"
"strconv"
"strings"
"sync"
@@ -32,15 +34,18 @@
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
-
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
- "google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/channelz"
+ icredentials "google.golang.org/grpc/internal/credentials"
+ "google.golang.org/grpc/internal/grpcutil"
+ imetadata "google.golang.org/grpc/internal/metadata"
"google.golang.org/grpc/internal/syscall"
+ "google.golang.org/grpc/internal/transport/networktype"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
+ "google.golang.org/grpc/resolver"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
)
@@ -57,7 +62,7 @@
cancel context.CancelFunc
ctxDone <-chan struct{} // Cache the ctx.Done() chan.
userAgent string
- md interface{}
+ md metadata.MD
conn net.Conn // underlying communication channel
loopy *loopyWriter
remoteAddr net.Addr
@@ -112,6 +117,9 @@
// goAwayReason records the http2.ErrCode and debug data received with the
// GoAway frame.
goAwayReason GoAwayReason
+ // goAwayDebugMessage contains a detailed human readable string about a
+ // GoAway frame, useful for error messages.
+ goAwayDebugMessage string
// A condition variable used to signal when the keepalive goroutine should
// go dormant. The condition for dormancy is based on the number of active
// streams and the `PermitWithoutStream` keepalive client parameter. And
@@ -135,11 +143,34 @@
connectionID uint64
}
-func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
+func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) {
+ address := addr.Addr
+ networkType, ok := networktype.Get(addr)
if fn != nil {
- return fn(ctx, addr)
+ // Special handling for unix scheme with custom dialer. Back in the day,
+ // we did not have a unix resolver and therefore targets with a unix
+ // scheme would end up using the passthrough resolver. So, user's used a
+ // custom dialer in this case and expected the original dial target to
+ // be passed to the custom dialer. Now, we have a unix resolver. But if
+ // a custom dialer is specified, we want to retain the old behavior in
+ // terms of the address being passed to the custom dialer.
+ if networkType == "unix" && !strings.HasPrefix(address, "\x00") {
+ // Supported unix targets are either "unix://absolute-path" or
+ // "unix:relative-path".
+ if filepath.IsAbs(address) {
+ return fn(ctx, "unix://"+address)
+ }
+ return fn(ctx, "unix:"+address)
+ }
+ return fn(ctx, address)
}
- return (&net.Dialer{}).DialContext(ctx, "tcp", addr)
+ if !ok {
+ networkType, address = parseDialTarget(address)
+ }
+ if networkType == "tcp" && useProxy {
+ return proxyDial(ctx, address, grpcUA)
+ }
+ return (&net.Dialer{}).DialContext(ctx, networkType, address)
}
func isTemporary(err error) bool {
@@ -161,7 +192,7 @@
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
// and starts to receive messages on it. Non-nil error returns if construction
// fails.
-func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
+func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
scheme := "http"
ctx, cancel := context.WithCancel(ctx)
defer func() {
@@ -170,7 +201,13 @@
}
}()
- conn, err := dial(connectCtx, opts.Dialer, addr.Addr)
+ // gRPC, resolver, balancer etc. can specify arbitrary data in the
+ // Attributes field of resolver.Address, which is shoved into connectCtx
+ // and passed to the dialer and credential handshaker. This makes it possible for
+ // address specific arbitrary data to reach custom dialers and credential handshakers.
+ connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes})
+
+ conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent)
if err != nil {
if opts.FailOnNonTempDialError {
return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
@@ -214,12 +251,34 @@
}
}
if transportCreds != nil {
- scheme = "https"
- conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn)
+ rawConn := conn
+ // Pull the deadline from the connectCtx, which will be used for
+ // timeouts in the authentication protocol handshake. Can ignore the
+ // boolean as the deadline will return the zero value, which will make
+ // the conn not timeout on I/O operations.
+ deadline, _ := connectCtx.Deadline()
+ rawConn.SetDeadline(deadline)
+ conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn)
+ rawConn.SetDeadline(time.Time{})
if err != nil {
return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
}
+ for _, cd := range perRPCCreds {
+ if cd.RequireTransportSecurity() {
+ if ci, ok := authInfo.(interface {
+ GetCommonAuthInfo() credentials.CommonAuthInfo
+ }); ok {
+ secLevel := ci.GetCommonAuthInfo().SecurityLevel
+ if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity {
+ return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection")
+ }
+ }
+ }
+ }
isSecure = true
+ if transportCreds.Info().SecurityProtocol == "tls" {
+ scheme = "https"
+ }
}
dynamicWindow := true
icwz := int32(initialWindowSize)
@@ -238,7 +297,6 @@
ctxDone: ctx.Done(), // Cache Done chan.
cancel: cancel,
userAgent: opts.UserAgent,
- md: addr.Metadata,
conn: conn,
remoteAddr: conn.RemoteAddr(),
localAddr: conn.LocalAddr(),
@@ -266,6 +324,12 @@
keepaliveEnabled: keepaliveEnabled,
bufferPool: newBufferPool(),
}
+
+ if md, ok := addr.Metadata.(*metadata.MD); ok {
+ t.md = *md
+ } else if md := imetadata.Get(addr); md != nil {
+ t.md = md
+ }
t.controlBuf = newControlBuffer(t.ctxDone)
if opts.InitialWindowSize >= defaultWindowSize {
t.initialWindowSize = opts.InitialWindowSize
@@ -302,12 +366,14 @@
// Send connection preface to server.
n, err := t.conn.Write(clientPreface)
if err != nil {
- t.Close()
- return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
+ err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
+ t.Close(err)
+ return nil, err
}
if n != len(clientPreface) {
- t.Close()
- return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
+ err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
+ t.Close(err)
+ return nil, err
}
var ss []http2.Setting
@@ -325,14 +391,16 @@
}
err = t.framer.fr.WriteSettings(ss...)
if err != nil {
- t.Close()
- return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
+ err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
+ t.Close(err)
+ return nil, err
}
// Adjust the connection flow control window if needed.
if delta := uint32(icwz - defaultWindowSize); delta > 0 {
if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil {
- t.Close()
- return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err)
+ err = connectionErrorf(true, err, "transport: failed to write window update: %v", err)
+ t.Close(err)
+ return nil, err
}
}
@@ -345,13 +413,14 @@
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
err := t.loopy.run()
if err != nil {
- errorf("transport: loopyWriter.run returning. Err: %v", err)
+ if logger.V(logLevel) {
+ logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
+ }
}
- // If it's a connection error, let reader goroutine handle it
- // since there might be data in the buffers.
- if _, ok := err.(net.Error); !ok {
- t.conn.Close()
- }
+ // Do not close the transport. Let reader goroutine handle it since
+ // there might be data in the buffers.
+ t.conn.Close()
+ t.controlBuf.finish()
close(t.writerDone)
}()
return t, nil
@@ -367,6 +436,7 @@
buf: newRecvBuffer(),
headerChan: make(chan struct{}),
contentSubtype: callHdr.ContentSubtype,
+ doneFunc: callHdr.DoneFunc,
}
s.wq = newWriteQuota(defaultWriteQuota, s.done)
s.requestRead = func(n int) {
@@ -406,7 +476,7 @@
Method: callHdr.Method,
AuthInfo: t.authInfo,
}
- ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri)
+ ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri)
authData, err := t.getTrAuthData(ctxWithRequestInfo, aud)
if err != nil {
return nil, err
@@ -425,7 +495,7 @@
headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method})
headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
- headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)})
headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
if callHdr.PreviousAttempts > 0 {
@@ -440,7 +510,7 @@
// Send out timeout regardless its value. The server can detect timeout context by itself.
// TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire.
timeout := time.Until(dl)
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)})
}
for k, v := range authData {
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
@@ -469,25 +539,23 @@
for _, vv := range added {
for i, v := range vv {
if i%2 == 0 {
- k = v
+ k = strings.ToLower(v)
continue
}
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
if isReservedHeader(k) {
continue
}
- headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)})
+ headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
}
- if md, ok := t.md.(*metadata.MD); ok {
- for k, vv := range *md {
- if isReservedHeader(k) {
- continue
- }
- for _, v := range vv {
- headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
- }
+ for k, vv := range t.md {
+ if isReservedHeader(k) {
+ continue
+ }
+ for _, v := range vv {
+ headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
return headerFields, nil
@@ -520,7 +588,7 @@
return nil, err
}
- return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err)
+ return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err)
}
for k, v := range data {
// Capital header names are illegal in HTTP/2.
@@ -537,8 +605,11 @@
// Note: if these credentials are provided both via dial options and call
// options, then both sets of credentials will be applied.
if callCreds := callHdr.Creds; callCreds != nil {
- if !t.isSecure && callCreds.RequireTransportSecurity() {
- return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
+ if callCreds.RequireTransportSecurity() {
+ ri, _ := credentials.RequestInfoFromContext(ctx)
+ if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil {
+ return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
+ }
}
data, err := callCreds.GetRequestMetadata(ctx, audience)
if err != nil {
@@ -554,13 +625,35 @@
return callAuthData, nil
}
+// NewStreamError wraps an error and reports additional information. Typically
+// NewStream errors result in transparent retry, as they mean nothing went onto
+// the wire. However, there are two notable exceptions:
+//
+// 1. If the stream headers violate the max header list size allowed by the
+// server. In this case there is no reason to retry at all, as it is
+// assumed the RPC would continue to fail on subsequent attempts.
+// 2. If the credentials errored when requesting their headers. In this case,
+// it's possible a retry can fix the problem, but indefinitely transparently
+// retrying is not appropriate as it is likely the credentials, if they can
+// eventually succeed, would need I/O to do so.
+type NewStreamError struct {
+ Err error
+
+ DoNotRetry bool
+ DoNotTransparentRetry bool
+}
+
+func (e NewStreamError) Error() string {
+ return e.Err.Error()
+}
+
// NewStream creates a stream and registers it into the transport as "active"
-// streams.
+// streams. All non-nil errors returned will be *NewStreamError.
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
ctx = peer.NewContext(ctx, t.getPeer())
headerFields, err := t.createHeaderFields(ctx, callHdr)
if err != nil {
- return nil, err
+ return nil, &NewStreamError{Err: err, DoNotTransparentRetry: true}
}
s := t.newStream(ctx, callHdr)
cleanup := func(err error) {
@@ -660,23 +753,23 @@
return true
}, hdr)
if err != nil {
- return nil, err
+ return nil, &NewStreamError{Err: err}
}
if success {
break
}
if hdrListSizeErr != nil {
- return nil, hdrListSizeErr
+ return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true}
}
firstTry = false
select {
case <-ch:
- case <-s.ctx.Done():
- return nil, ContextErr(s.ctx.Err())
+ case <-ctx.Done():
+ return nil, &NewStreamError{Err: ContextErr(ctx.Err())}
case <-t.goAway:
- return nil, errStreamDrain
+ return nil, &NewStreamError{Err: errStreamDrain}
case <-t.ctx.Done():
- return nil, ErrConnClosing
+ return nil, &NewStreamError{Err: ErrConnClosing}
}
}
if t.statsHandler != nil {
@@ -771,6 +864,9 @@
t.controlBuf.executeAndPut(addBackStreamQuota, cleanup)
// This will unblock write.
close(s.done)
+ if s.doneFunc != nil {
+ s.doneFunc()
+ }
}
// Close kicks off the shutdown process of the transport. This should be called
@@ -780,12 +876,12 @@
// This method blocks until the addrConn that initiated this transport is
// re-connected. This happens because t.onClose() begins reconnect logic at the
// addrConn level and blocks until the addrConn is successfully connected.
-func (t *http2Client) Close() error {
+func (t *http2Client) Close(err error) {
t.mu.Lock()
// Make sure we only Close once.
if t.state == closing {
t.mu.Unlock()
- return nil
+ return
}
// Call t.onClose before setting the state to closing to prevent the client
// from attempting to create new streams ASAP.
@@ -801,13 +897,25 @@
t.mu.Unlock()
t.controlBuf.finish()
t.cancel()
- err := t.conn.Close()
+ t.conn.Close()
if channelz.IsOn() {
channelz.RemoveEntry(t.channelzID)
}
+ // Append info about previous goaways if there were any, since this may be important
+ // for understanding the root cause for this connection to be closed.
+ _, goAwayDebugMessage := t.GetGoAwayReason()
+
+ var st *status.Status
+ if len(goAwayDebugMessage) > 0 {
+ st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage)
+ err = st.Err()
+ } else {
+ st = status.New(codes.Unavailable, err.Error())
+ }
+
// Notify all active streams.
for _, s := range streams {
- t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false)
+ t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false)
}
if t.statsHandler != nil {
connEnd := &stats.ConnEnd{
@@ -815,7 +923,6 @@
}
t.statsHandler.HandleConn(t.ctx, connEnd)
}
- return err
}
// GracefulClose sets the state to draining, which prevents new streams from
@@ -834,7 +941,7 @@
active := len(t.activeStreams)
t.mu.Unlock()
if active == 0 {
- t.Close()
+ t.Close(ErrConnClosing)
return
}
t.controlBuf.put(&incomingGoAway{})
@@ -854,18 +961,10 @@
df := &dataFrame{
streamID: s.id,
endStream: opts.Last,
+ h: hdr,
+ d: data,
}
- if hdr != nil || data != nil { // If it's not an empty data frame.
- // Add some data to grpc message header so that we can equally
- // distribute bytes across frames.
- emptyLen := http2MaxFrameLen - len(hdr)
- if emptyLen > len(data) {
- emptyLen = len(data)
- }
- hdr = append(hdr, data[:emptyLen]...)
- data = data[emptyLen:]
- df.h, df.d = hdr, data
- // TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler.
+ if hdr != nil || data != nil { // If it's not an empty data frame, check quota.
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
return err
}
@@ -983,7 +1082,7 @@
}
// The server has closed the stream without sending trailers. Record that
// the read direction is closed, and set the status appropriately.
- if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) {
+ if f.StreamEnded() {
t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true)
}
}
@@ -999,7 +1098,9 @@
}
statusCode, ok := http2ErrConvTab[f.ErrCode]
if !ok {
- warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
+ if logger.V(logLevel) {
+ logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
+ }
statusCode = codes.Unknown
}
if statusCode == codes.Canceled {
@@ -1081,12 +1182,14 @@
return
}
if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
- infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
+ if logger.V(logLevel) {
+ logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
+ }
}
id := f.LastStreamID
- if id > 0 && id%2 != 1 {
+ if id > 0 && id%2 == 0 {
t.mu.Unlock()
- t.Close()
+ t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id))
return
}
// A client can receive multiple GoAways from the server (see
@@ -1104,7 +1207,7 @@
// If there are multiple GoAways the first one should always have an ID greater than the following ones.
if id > t.prevGoAwayID {
t.mu.Unlock()
- t.Close()
+ t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID))
return
}
default:
@@ -1134,7 +1237,7 @@
active := len(t.activeStreams)
t.mu.Unlock()
if active == 0 {
- t.Close()
+ t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams"))
}
}
@@ -1150,12 +1253,17 @@
t.goAwayReason = GoAwayTooManyPings
}
}
+ if len(f.DebugData()) == 0 {
+ t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode)
+ } else {
+ t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData()))
+ }
}
-func (t *http2Client) GetGoAwayReason() GoAwayReason {
+func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) {
t.mu.Lock()
defer t.mu.Unlock()
- return t.goAwayReason
+ return t.goAwayReason, t.goAwayDebugMessage
}
func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
@@ -1182,35 +1290,128 @@
return
}
- state := &decodeState{}
- // Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode.
- state.data.isGRPC = !initialHeader
- if err := state.decodeHeader(frame); err != nil {
- t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream)
+ // frame.Truncated is set to true when framer detects that the current header
+ // list size hits MaxHeaderListSize limit.
+ if frame.Truncated {
+ se := status.New(codes.Internal, "peer header list size exceeded limit")
+ t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream)
+ return
+ }
+
+ var (
+ // If a gRPC Response-Headers has already been received, then it means
+ // that the peer is speaking gRPC and we are in gRPC mode.
+ isGRPC = !initialHeader
+ mdata = make(map[string][]string)
+ contentTypeErr = "malformed header: missing HTTP content-type"
+ grpcMessage string
+ statusGen *status.Status
+ recvCompress string
+ httpStatusCode *int
+ httpStatusErr string
+ rawStatusCode = codes.Unknown
+ // headerError is set if an error is encountered while parsing the headers
+ headerError string
+ )
+
+ if initialHeader {
+ httpStatusErr = "malformed header: missing HTTP status"
+ }
+
+ for _, hf := range frame.Fields {
+ switch hf.Name {
+ case "content-type":
+ if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType {
+ contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value)
+ break
+ }
+ contentTypeErr = ""
+ mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
+ isGRPC = true
+ case "grpc-encoding":
+ recvCompress = hf.Value
+ case "grpc-status":
+ code, err := strconv.ParseInt(hf.Value, 10, 32)
+ if err != nil {
+ se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err))
+ t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+ return
+ }
+ rawStatusCode = codes.Code(uint32(code))
+ case "grpc-message":
+ grpcMessage = decodeGrpcMessage(hf.Value)
+ case "grpc-status-details-bin":
+ var err error
+ statusGen, err = decodeGRPCStatusDetails(hf.Value)
+ if err != nil {
+ headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err)
+ }
+ case ":status":
+ if hf.Value == "200" {
+ httpStatusErr = ""
+ statusCode := 200
+ httpStatusCode = &statusCode
+ break
+ }
+
+ c, err := strconv.ParseInt(hf.Value, 10, 32)
+ if err != nil {
+ se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err))
+ t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+ return
+ }
+ statusCode := int(c)
+ httpStatusCode = &statusCode
+
+ httpStatusErr = fmt.Sprintf(
+ "unexpected HTTP status code received from server: %d (%s)",
+ statusCode,
+ http.StatusText(statusCode),
+ )
+ default:
+ if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
+ break
+ }
+ v, err := decodeMetadataHeader(hf.Name, hf.Value)
+ if err != nil {
+ headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err)
+ logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
+ break
+ }
+ mdata[hf.Name] = append(mdata[hf.Name], v)
+ }
+ }
+
+ if !isGRPC || httpStatusErr != "" {
+ var code = codes.Internal // when header does not include HTTP status, return INTERNAL
+
+ if httpStatusCode != nil {
+ var ok bool
+ code, ok = HTTPStatusConvTab[*httpStatusCode]
+ if !ok {
+ code = codes.Unknown
+ }
+ }
+ var errs []string
+ if httpStatusErr != "" {
+ errs = append(errs, httpStatusErr)
+ }
+ if contentTypeErr != "" {
+ errs = append(errs, contentTypeErr)
+ }
+ // Verify the HTTP response is a 200.
+ se := status.New(code, strings.Join(errs, "; "))
+ t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+ return
+ }
+
+ if headerError != "" {
+ se := status.New(codes.Internal, headerError)
+ t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
return
}
isHeader := false
- defer func() {
- if t.statsHandler != nil {
- if isHeader {
- inHeader := &stats.InHeader{
- Client: true,
- WireLength: int(frame.Header().Length),
- Header: s.header.Copy(),
- Compression: s.recvCompress,
- }
- t.statsHandler.HandleRPC(s.ctx, inHeader)
- } else {
- inTrailer := &stats.InTrailer{
- Client: true,
- WireLength: int(frame.Header().Length),
- Trailer: s.trailer.Copy(),
- }
- t.statsHandler.HandleRPC(s.ctx, inTrailer)
- }
- }
- }()
// If headerChan hasn't been closed yet
if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
@@ -1221,9 +1422,9 @@
// These values can be set without any synchronization because
// stream goroutine will read it only after seeing a closed
// headerChan which we'll close after setting this.
- s.recvCompress = state.data.encoding
- if len(state.data.mdata) > 0 {
- s.header = state.data.mdata
+ s.recvCompress = recvCompress
+ if len(mdata) > 0 {
+ s.header = mdata
}
} else {
// HEADERS frame block carries a Trailers-Only.
@@ -1232,13 +1433,36 @@
close(s.headerChan)
}
+ if t.statsHandler != nil {
+ if isHeader {
+ inHeader := &stats.InHeader{
+ Client: true,
+ WireLength: int(frame.Header().Length),
+ Header: metadata.MD(mdata).Copy(),
+ Compression: s.recvCompress,
+ }
+ t.statsHandler.HandleRPC(s.ctx, inHeader)
+ } else {
+ inTrailer := &stats.InTrailer{
+ Client: true,
+ WireLength: int(frame.Header().Length),
+ Trailer: metadata.MD(mdata).Copy(),
+ }
+ t.statsHandler.HandleRPC(s.ctx, inTrailer)
+ }
+ }
+
if !endStream {
return
}
+ if statusGen == nil {
+ statusGen = status.New(rawStatusCode, grpcMessage)
+ }
+
// if client received END_STREAM from server while stream was still active, send RST_STREAM
rst := s.getState() == streamActive
- t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true)
+ t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true)
}
// reader runs as a separate goroutine in charge of reading data from network
@@ -1252,7 +1476,8 @@
// Check the validity of server preface.
frame, err := t.framer.fr.ReadFrame()
if err != nil {
- t.Close() // this kicks off resetTransport, so must be last before return
+ err = connectionErrorf(true, err, "error reading server preface: %v", err)
+ t.Close(err) // this kicks off resetTransport, so must be last before return
return
}
t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
@@ -1261,7 +1486,8 @@
}
sf, ok := frame.(*http2.SettingsFrame)
if !ok {
- t.Close() // this kicks off resetTransport, so must be last before return
+ // this kicks off resetTransport, so must be last before return
+ t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame))
return
}
t.onPrefaceReceipt()
@@ -1285,13 +1511,19 @@
if s != nil {
// use error detail to provide better err message
code := http2ErrConvTab[se.Code]
- msg := t.framer.fr.ErrorDetail().Error()
+ errorDetail := t.framer.fr.ErrorDetail()
+ var msg string
+ if errorDetail != nil {
+ msg = errorDetail.Error()
+ } else {
+ msg = "received invalid frame"
+ }
t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false)
}
continue
} else {
// Transport error.
- t.Close()
+ t.Close(connectionErrorf(true, err, "error reading from server: %v", err))
return
}
}
@@ -1311,7 +1543,9 @@
case *http2.WindowUpdateFrame:
t.handleWindowUpdate(frame)
default:
- errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
+ if logger.V(logLevel) {
+ logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
+ }
}
}
}
@@ -1323,7 +1557,7 @@
return b
}
-// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
+// keepalive running in a separate goroutine makes sure the connection is alive by sending pings.
func (t *http2Client) keepalive() {
p := &ping{data: [8]byte{}}
// True iff a ping has been sent, and no data has been received since then.
@@ -1348,7 +1582,7 @@
continue
}
if outstandingPing && timeoutLeft <= 0 {
- t.Close()
+ t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout"))
return
}
t.mu.Lock()
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index fa33ffb..2c6eaf0 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -26,6 +26,7 @@
"io"
"math"
"net"
+ "net/http"
"strconv"
"sync"
"sync/atomic"
@@ -34,10 +35,10 @@
"github.com/golang/protobuf/proto"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
+ "google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/keepalive"
@@ -72,7 +73,6 @@
writerDone chan struct{} // sync point to enable testing.
remoteAddr net.Addr
localAddr net.Addr
- maxStreamID uint32 // max stream ID ever seen
authInfo credentials.AuthInfo // auth info about the connection
inTapHandle tap.ServerInHandle
framer *framer
@@ -101,11 +101,11 @@
mu sync.Mutex // guard the following
- // drainChan is initialized when drain(...) is called the first time.
+ // drainChan is initialized when Drain() is called the first time.
// After which the server writes out the first GoAway(with ID 2^31-1) frame.
// Then an independent goroutine will be launched to later send the second GoAway.
// During this time we don't want to write another first GoAway(with ID 2^31 -1) frame.
- // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is
+ // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is
// already underway.
drainChan chan struct{}
state transportState
@@ -122,11 +122,37 @@
bufferPool *bufferPool
connectionID uint64
+
+ // maxStreamMu guards the maximum stream ID
+ // This lock may not be taken if mu is already held.
+ maxStreamMu sync.Mutex
+ maxStreamID uint32 // max stream ID ever seen
}
-// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
-// returned if something goes wrong.
-func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
+// NewServerTransport creates a http2 transport with conn and configuration
+// options from config.
+//
+// It returns a non-nil transport and a nil error on success. On failure, it
+// returns a nil transport and a non-nil error. For a special case where the
+// underlying conn gets closed before the client preface could be read, it
+// returns a nil transport and a nil error.
+func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
+ var authInfo credentials.AuthInfo
+ rawConn := conn
+ if config.Credentials != nil {
+ var err error
+ conn, authInfo, err = config.Credentials.ServerHandshake(rawConn)
+ if err != nil {
+ // ErrConnDispatched means that the connection was dispatched away
+ // from gRPC; those connections should be left open. io.EOF means
+ // the connection was closed before handshaking completed, which can
+ // happen naturally from probers. Return these errors directly.
+ if err == credentials.ErrConnDispatched || err == io.EOF {
+ return nil, err
+ }
+ return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
+ }
+ }
writeBufSize := config.WriteBufferSize
readBufSize := config.ReadBufferSize
maxHeaderListSize := defaultServerMaxHeaderListSize
@@ -209,14 +235,15 @@
if kep.MinTime == 0 {
kep.MinTime = defaultKeepalivePolicyMinTime
}
+
done := make(chan struct{})
t := &http2Server{
- ctx: context.Background(),
+ ctx: setConnection(context.Background(), rawConn),
done: done,
conn: conn,
remoteAddr: conn.RemoteAddr(),
localAddr: conn.LocalAddr(),
- authInfo: config.AuthInfo,
+ authInfo: authInfo,
framer: framer,
readerDone: make(chan struct{}),
writerDone: make(chan struct{}),
@@ -265,6 +292,14 @@
// Check the validity of client preface.
preface := make([]byte, len(clientPreface))
if _, err := io.ReadFull(t.conn, preface); err != nil {
+ // In deployments where a gRPC server runs behind a cloud load balancer
+ // which performs regular TCP level health checks, the connection is
+ // closed immediately by the latter. Returning io.EOF here allows the
+ // grpc server implementation to recognize this scenario and suppress
+ // logging to reduce spam.
+ if err == io.EOF {
+ return nil, io.EOF
+ }
return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
}
if !bytes.Equal(preface, clientPreface) {
@@ -289,9 +324,12 @@
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
if err := t.loopy.run(); err != nil {
- errorf("transport: loopyWriter.run returning. Err: %v", err)
+ if logger.V(logLevel) {
+ logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
+ }
}
t.conn.Close()
+ t.controlBuf.finish()
close(t.writerDone)
}()
go t.keepalive()
@@ -300,38 +338,144 @@
// operateHeader takes action on the decoded headers.
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
+ // Acquire max stream ID lock for entire duration
+ t.maxStreamMu.Lock()
+ defer t.maxStreamMu.Unlock()
+
streamID := frame.Header().StreamID
- state := &decodeState{
- serverSide: true,
- }
- if err := state.decodeHeader(frame); err != nil {
- if se, ok := status.FromError(err); ok {
- t.controlBuf.put(&cleanupStream{
- streamID: streamID,
- rst: true,
- rstCode: statusCodeConvTab[se.Code()],
- onWrite: func() {},
- })
- }
+
+ // frame.Truncated is set to true when framer detects that the current header
+ // list size hits MaxHeaderListSize limit.
+ if frame.Truncated {
+ t.controlBuf.put(&cleanupStream{
+ streamID: streamID,
+ rst: true,
+ rstCode: http2.ErrCodeFrameSize,
+ onWrite: func() {},
+ })
return false
}
+ if streamID%2 != 1 || streamID <= t.maxStreamID {
+ // illegal gRPC stream id.
+ if logger.V(logLevel) {
+ logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
+ }
+ return true
+ }
+ t.maxStreamID = streamID
+
buf := newRecvBuffer()
s := &Stream{
- id: streamID,
- st: t,
- buf: buf,
- fc: &inFlow{limit: uint32(t.initialWindowSize)},
- recvCompress: state.data.encoding,
- method: state.data.method,
- contentSubtype: state.data.contentSubtype,
+ id: streamID,
+ st: t,
+ buf: buf,
+ fc: &inFlow{limit: uint32(t.initialWindowSize)},
}
+ var (
+ // If a gRPC Response-Headers has already been received, then it means
+ // that the peer is speaking gRPC and we are in gRPC mode.
+ isGRPC = false
+ mdata = make(map[string][]string)
+ httpMethod string
+ // headerError is set if an error is encountered while parsing the headers
+ headerError bool
+
+ timeoutSet bool
+ timeout time.Duration
+ )
+
+ for _, hf := range frame.Fields {
+ switch hf.Name {
+ case "content-type":
+ contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value)
+ if !validContentType {
+ break
+ }
+ mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
+ s.contentSubtype = contentSubtype
+ isGRPC = true
+ case "grpc-encoding":
+ s.recvCompress = hf.Value
+ case ":method":
+ httpMethod = hf.Value
+ case ":path":
+ s.method = hf.Value
+ case "grpc-timeout":
+ timeoutSet = true
+ var err error
+ if timeout, err = decodeTimeout(hf.Value); err != nil {
+ headerError = true
+ }
+ // "Transports must consider requests containing the Connection header
+ // as malformed." - A41
+ case "connection":
+ if logger.V(logLevel) {
+ logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec")
+ }
+ headerError = true
+ default:
+ if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
+ break
+ }
+ v, err := decodeMetadataHeader(hf.Name, hf.Value)
+ if err != nil {
+ headerError = true
+ logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
+ break
+ }
+ mdata[hf.Name] = append(mdata[hf.Name], v)
+ }
+ }
+
+ // "If multiple Host headers or multiple :authority headers are present, the
+ // request must be rejected with an HTTP status code 400 as required by Host
+ // validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM
+ // with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2
+ // error, this takes precedence over a client not speaking gRPC.
+ if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 {
+ errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"]))
+ if logger.V(logLevel) {
+ logger.Errorf("transport: %v", errMsg)
+ }
+ t.controlBuf.put(&earlyAbortStream{
+ httpStatus: 400,
+ streamID: streamID,
+ contentSubtype: s.contentSubtype,
+ status: status.New(codes.Internal, errMsg),
+ })
+ return false
+ }
+
+ if !isGRPC || headerError {
+ t.controlBuf.put(&cleanupStream{
+ streamID: streamID,
+ rst: true,
+ rstCode: http2.ErrCodeProtocol,
+ onWrite: func() {},
+ })
+ return false
+ }
+
+ // "If :authority is missing, Host must be renamed to :authority." - A41
+ if len(mdata[":authority"]) == 0 {
+ // No-op if host isn't present, no eventual :authority header is a valid
+ // RPC.
+ if host, ok := mdata["host"]; ok {
+ mdata[":authority"] = host
+ delete(mdata, "host")
+ }
+ } else {
+ // "If :authority is present, Host must be discarded" - A41
+ delete(mdata, "host")
+ }
+
if frame.StreamEnded() {
// s is just created by the caller. No lock needed.
s.state = streamReadDone
}
- if state.data.timeoutSet {
- s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout)
+ if timeoutSet {
+ s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout)
} else {
s.ctx, s.cancel = context.WithCancel(t.ctx)
}
@@ -344,31 +488,13 @@
}
s.ctx = peer.NewContext(s.ctx, pr)
// Attach the received metadata to the context.
- if len(state.data.mdata) > 0 {
- s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata)
- }
- if state.data.statsTags != nil {
- s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags)
- }
- if state.data.statsTrace != nil {
- s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace)
- }
- if t.inTapHandle != nil {
- var err error
- info := &tap.Info{
- FullMethodName: state.data.method,
+ if len(mdata) > 0 {
+ s.ctx = metadata.NewIncomingContext(s.ctx, mdata)
+ if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 {
+ s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1]))
}
- s.ctx, err = t.inTapHandle(s.ctx, info)
- if err != nil {
- warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
- t.controlBuf.put(&cleanupStream{
- streamID: s.id,
- rst: true,
- rstCode: http2.ErrCodeRefusedStream,
- onWrite: func() {},
- })
- s.cancel()
- return false
+ if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 {
+ s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1]))
}
}
t.mu.Lock()
@@ -388,14 +514,40 @@
s.cancel()
return false
}
- if streamID%2 != 1 || streamID <= t.maxStreamID {
+ if httpMethod != http.MethodPost {
t.mu.Unlock()
- // illegal gRPC stream id.
- errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
+ if logger.V(logLevel) {
+ logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod)
+ }
+ t.controlBuf.put(&cleanupStream{
+ streamID: streamID,
+ rst: true,
+ rstCode: http2.ErrCodeProtocol,
+ onWrite: func() {},
+ })
s.cancel()
- return true
+ return false
}
- t.maxStreamID = streamID
+ if t.inTapHandle != nil {
+ var err error
+ if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil {
+ t.mu.Unlock()
+ if logger.V(logLevel) {
+ logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
+ }
+ stat, ok := status.FromError(err)
+ if !ok {
+ stat = status.New(codes.PermissionDenied, err.Error())
+ }
+ t.controlBuf.put(&earlyAbortStream{
+ httpStatus: 200,
+ streamID: s.id,
+ contentSubtype: s.contentSubtype,
+ status: stat,
+ })
+ return false
+ }
+ }
t.activeStreams[streamID] = s
if len(t.activeStreams) == 1 {
t.idle = time.Time{}
@@ -417,7 +569,7 @@
LocalAddr: t.localAddr,
Compression: s.recvCompress,
WireLength: int(frame.Header().Length),
- Header: metadata.MD(state.data.mdata).Copy(),
+ Header: metadata.MD(mdata).Copy(),
}
t.stats.HandleRPC(s.ctx, inHeader)
}
@@ -454,7 +606,9 @@
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
if err != nil {
if se, ok := err.(http2.StreamError); ok {
- warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
+ if logger.V(logLevel) {
+ logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
+ }
t.mu.Lock()
s := t.activeStreams[se.StreamID]
t.mu.Unlock()
@@ -474,7 +628,9 @@
t.Close()
return
}
- warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
+ if logger.V(logLevel) {
+ logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
+ }
t.Close()
return
}
@@ -497,7 +653,9 @@
case *http2.GoAwayFrame:
// TODO: Handle GoAway from the client appropriately.
default:
- errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
+ if logger.V(logLevel) {
+ logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
+ }
}
}
}
@@ -599,6 +757,10 @@
if !ok {
return
}
+ if s.getState() == streamReadDone {
+ t.closeStream(s, true, http2.ErrCodeStreamClosed, false)
+ return
+ }
if size > 0 {
if err := s.fc.onData(size); err != nil {
t.closeStream(s, true, http2.ErrCodeFlowControl, false)
@@ -619,7 +781,7 @@
s.write(recvMsg{buffer: buffer})
}
}
- if f.Header().Flags.Has(http2.FlagDataEndStream) {
+ if f.StreamEnded() {
// Received the end of stream from the client.
s.compareAndSwapState(streamActive, streamReadDone)
s.write(recvMsg{err: io.EOF})
@@ -719,7 +881,9 @@
if t.pingStrikes > maxPingStrikes {
// Send goaway and close the connection.
- errorf("transport: Got too many pings from the client, closing the connection.")
+ if logger.V(logLevel) {
+ logger.Errorf("transport: Got too many pings from the client, closing the connection.")
+ }
t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
}
}
@@ -752,7 +916,9 @@
var sz int64
for _, f := range hdrFrame.hf {
if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
- errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
+ if logger.V(logLevel) {
+ logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
+ }
return false
}
}
@@ -789,7 +955,7 @@
// first and create a slice of that exact size.
headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
- headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
if s.sendCompress != "" {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
}
@@ -839,7 +1005,7 @@
}
} else { // Send a trailer only response.
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
- headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
}
}
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
@@ -849,7 +1015,7 @@
stBytes, err := proto.Marshal(p)
if err != nil {
// TODO: return error instead, when callers are able to handle it.
- grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
+ logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
} else {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
}
@@ -909,13 +1075,6 @@
return ContextErr(s.ctx.Err())
}
}
- // Add some data to header frame so that we can equally distribute bytes across frames.
- emptyLen := http2MaxFrameLen - len(hdr)
- if emptyLen > len(data) {
- emptyLen = len(data)
- }
- hdr = append(hdr, data[:emptyLen]...)
- data = data[emptyLen:]
df := &dataFrame{
streamID: s.id,
h: hdr,
@@ -977,17 +1136,19 @@
if val <= 0 {
// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
// Gracefully close the connection.
- t.drain(http2.ErrCodeNo, []byte{})
+ t.Drain()
return
}
idleTimer.Reset(val)
case <-ageTimer.C:
- t.drain(http2.ErrCodeNo, []byte{})
+ t.Drain()
ageTimer.Reset(t.kp.MaxConnectionAgeGrace)
select {
case <-ageTimer.C:
// Close the connection after grace period.
- infof("transport: closing server transport due to maximum connection age.")
+ if logger.V(logLevel) {
+ logger.Infof("transport: closing server transport due to maximum connection age.")
+ }
t.Close()
case <-t.done:
}
@@ -1004,7 +1165,9 @@
continue
}
if outstandingPing && kpTimeoutLeft <= 0 {
- infof("transport: closing server transport due to idleness.")
+ if logger.V(logLevel) {
+ logger.Infof("transport: closing server transport due to idleness.")
+ }
t.Close()
return
}
@@ -1032,11 +1195,11 @@
// Close starts shutting down the http2Server transport.
// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
// could cause some resource issue. Revisit this later.
-func (t *http2Server) Close() error {
+func (t *http2Server) Close() {
t.mu.Lock()
if t.state == closing {
t.mu.Unlock()
- return errors.New("transport: Close() was already called")
+ return
}
t.state = closing
streams := t.activeStreams
@@ -1044,7 +1207,9 @@
t.mu.Unlock()
t.controlBuf.finish()
close(t.done)
- err := t.conn.Close()
+ if err := t.conn.Close(); err != nil && logger.V(logLevel) {
+ logger.Infof("transport: error closing conn during Close: %v", err)
+ }
if channelz.IsOn() {
channelz.RemoveEntry(t.channelzID)
}
@@ -1056,7 +1221,6 @@
connEnd := &stats.ConnEnd{}
t.stats.HandleConn(t.ctx, connEnd)
}
- return err
}
// deleteStream deletes the stream s from transport's active streams.
@@ -1121,17 +1285,13 @@
}
func (t *http2Server) Drain() {
- t.drain(http2.ErrCodeNo, []byte{})
-}
-
-func (t *http2Server) drain(code http2.ErrCode, debugData []byte) {
t.mu.Lock()
defer t.mu.Unlock()
if t.drainChan != nil {
return
}
t.drainChan = make(chan struct{})
- t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true})
+ t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true})
}
var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
@@ -1139,20 +1299,23 @@
// Handles outgoing GoAway and returns true if loopy needs to put itself
// in draining mode.
func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
+ t.maxStreamMu.Lock()
t.mu.Lock()
if t.state == closing { // TODO(mmukhi): This seems unnecessary.
t.mu.Unlock()
+ t.maxStreamMu.Unlock()
// The transport is closing.
return false, ErrConnClosing
}
- sid := t.maxStreamID
if !g.headsUp {
// Stop accepting more streams now.
t.state = draining
+ sid := t.maxStreamID
if len(t.activeStreams) == 0 {
g.closeConn = true
}
t.mu.Unlock()
+ t.maxStreamMu.Unlock()
if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
return false, err
}
@@ -1165,6 +1328,7 @@
return true, nil
}
t.mu.Unlock()
+ t.maxStreamMu.Unlock()
// For a graceful close, send out a GoAway with stream ID of MaxUInt32,
// Follow that with a ping and wait for the ack to come back or a timer
// to expire. During this time accept new streams since they might have
@@ -1249,3 +1413,18 @@
j := grpcrand.Int63n(2*r) - r
return time.Duration(j)
}
+
+type connectionKey struct{}
+
+// GetConnection gets the connection from the context.
+func GetConnection(ctx context.Context) net.Conn {
+ conn, _ := ctx.Value(connectionKey{}).(net.Conn)
+ return conn
+}
+
+// SetConnection adds the connection to the context to be able to get
+// information about the destination ip and port for an incoming RPC. This also
+// allows any unary or streaming interceptors to see the connection.
+func setConnection(ctx context.Context, conn net.Conn) context.Context {
+ return context.WithValue(ctx, connectionKey{}, conn)
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
index 8f5f334..d8247bc 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -27,6 +27,7 @@
"math"
"net"
"net/http"
+ "net/url"
"strconv"
"strings"
"time"
@@ -37,6 +38,7 @@
"golang.org/x/net/http2/hpack"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
@@ -50,7 +52,7 @@
// "proto" as a suffix after "+" or ";". See
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
// for more details.
- baseContentType = "application/grpc"
+
)
var (
@@ -71,13 +73,6 @@
http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
http2.ErrCodeHTTP11Required: codes.Internal,
}
- statusCodeConvTab = map[codes.Code]http2.ErrCode{
- codes.Internal: http2.ErrCodeInternal,
- codes.Canceled: http2.ErrCodeCancel,
- codes.Unavailable: http2.ErrCodeRefusedStream,
- codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
- codes.PermissionDenied: http2.ErrCodeInadequateSecurity,
- }
// HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table.
HTTPStatusConvTab = map[int]codes.Code{
// 400 Bad Request - INTERNAL.
@@ -97,54 +92,9 @@
// 504 Gateway timeout - UNAVAILABLE.
http.StatusGatewayTimeout: codes.Unavailable,
}
+ logger = grpclog.Component("transport")
)
-type parsedHeaderData struct {
- encoding string
- // statusGen caches the stream status received from the trailer the server
- // sent. Client side only. Do not access directly. After all trailers are
- // parsed, use the status method to retrieve the status.
- statusGen *status.Status
- // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not
- // intended for direct access outside of parsing.
- rawStatusCode *int
- rawStatusMsg string
- httpStatus *int
- // Server side only fields.
- timeoutSet bool
- timeout time.Duration
- method string
- // key-value metadata map from the peer.
- mdata map[string][]string
- statsTags []byte
- statsTrace []byte
- contentSubtype string
-
- // isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP).
- //
- // We are in gRPC mode (peer speaking gRPC) if:
- // * We are client side and have already received a HEADER frame that indicates gRPC peer.
- // * The header contains valid a content-type, i.e. a string starts with "application/grpc"
- // And we should handle error specific to gRPC.
- //
- // Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we
- // are in HTTP fallback mode, and should handle error specific to HTTP.
- isGRPC bool
- grpcErr error
- httpErr error
- contentTypeErr string
-}
-
-// decodeState configures decoding criteria and records the decoded data.
-type decodeState struct {
- // whether decoding on server side or not
- serverSide bool
-
- // Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS
- // frame once decodeHeader function has been invoked and returned.
- data parsedHeaderData
-}
-
// isReservedHeader checks whether hdr belongs to HTTP2 headers
// reserved by gRPC protocol. Any other headers are classified as the
// user-specified metadata.
@@ -182,54 +132,6 @@
}
}
-// contentSubtype returns the content-subtype for the given content-type. The
-// given content-type must be a valid content-type that starts with
-// "application/grpc". A content-subtype will follow "application/grpc" after a
-// "+" or ";". See
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details.
-//
-// If contentType is not a valid content-type for gRPC, the boolean
-// will be false, otherwise true. If content-type == "application/grpc",
-// "application/grpc+", or "application/grpc;", the boolean will be true,
-// but no content-subtype will be returned.
-//
-// contentType is assumed to be lowercase already.
-func contentSubtype(contentType string) (string, bool) {
- if contentType == baseContentType {
- return "", true
- }
- if !strings.HasPrefix(contentType, baseContentType) {
- return "", false
- }
- // guaranteed since != baseContentType and has baseContentType prefix
- switch contentType[len(baseContentType)] {
- case '+', ';':
- // this will return true for "application/grpc+" or "application/grpc;"
- // which the previous validContentType function tested to be valid, so we
- // just say that no content-subtype is specified in this case
- return contentType[len(baseContentType)+1:], true
- default:
- return "", false
- }
-}
-
-// contentSubtype is assumed to be lowercase
-func contentType(contentSubtype string) string {
- if contentSubtype == "" {
- return baseContentType
- }
- return baseContentType + "+" + contentSubtype
-}
-
-func (d *decodeState) status() *status.Status {
- if d.data.statusGen == nil {
- // No status-details were provided; generate status using code/msg.
- d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg)
- }
- return d.data.statusGen
-}
-
const binHdrSuffix = "-bin"
func encodeBinHeader(v []byte) string {
@@ -259,164 +161,16 @@
return v, nil
}
-func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error {
- // frame.Truncated is set to true when framer detects that the current header
- // list size hits MaxHeaderListSize limit.
- if frame.Truncated {
- return status.Error(codes.Internal, "peer header list size exceeded limit")
+func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) {
+ v, err := decodeBinHeader(rawDetails)
+ if err != nil {
+ return nil, err
}
-
- for _, hf := range frame.Fields {
- d.processHeaderField(hf)
+ st := &spb.Status{}
+ if err = proto.Unmarshal(v, st); err != nil {
+ return nil, err
}
-
- if d.data.isGRPC {
- if d.data.grpcErr != nil {
- return d.data.grpcErr
- }
- if d.serverSide {
- return nil
- }
- if d.data.rawStatusCode == nil && d.data.statusGen == nil {
- // gRPC status doesn't exist.
- // Set rawStatusCode to be unknown and return nil error.
- // So that, if the stream has ended this Unknown status
- // will be propagated to the user.
- // Otherwise, it will be ignored. In which case, status from
- // a later trailer, that has StreamEnded flag set, is propagated.
- code := int(codes.Unknown)
- d.data.rawStatusCode = &code
- }
- return nil
- }
-
- // HTTP fallback mode
- if d.data.httpErr != nil {
- return d.data.httpErr
- }
-
- var (
- code = codes.Internal // when header does not include HTTP status, return INTERNAL
- ok bool
- )
-
- if d.data.httpStatus != nil {
- code, ok = HTTPStatusConvTab[*(d.data.httpStatus)]
- if !ok {
- code = codes.Unknown
- }
- }
-
- return status.Error(code, d.constructHTTPErrMsg())
-}
-
-// constructErrMsg constructs error message to be returned in HTTP fallback mode.
-// Format: HTTP status code and its corresponding message + content-type error message.
-func (d *decodeState) constructHTTPErrMsg() string {
- var errMsgs []string
-
- if d.data.httpStatus == nil {
- errMsgs = append(errMsgs, "malformed header: missing HTTP status")
- } else {
- errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus))
- }
-
- if d.data.contentTypeErr == "" {
- errMsgs = append(errMsgs, "transport: missing content-type field")
- } else {
- errMsgs = append(errMsgs, d.data.contentTypeErr)
- }
-
- return strings.Join(errMsgs, "; ")
-}
-
-func (d *decodeState) addMetadata(k, v string) {
- if d.data.mdata == nil {
- d.data.mdata = make(map[string][]string)
- }
- d.data.mdata[k] = append(d.data.mdata[k], v)
-}
-
-func (d *decodeState) processHeaderField(f hpack.HeaderField) {
- switch f.Name {
- case "content-type":
- contentSubtype, validContentType := contentSubtype(f.Value)
- if !validContentType {
- d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value)
- return
- }
- d.data.contentSubtype = contentSubtype
- // TODO: do we want to propagate the whole content-type in the metadata,
- // or come up with a way to just propagate the content-subtype if it was set?
- // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"}
- // in the metadata?
- d.addMetadata(f.Name, f.Value)
- d.data.isGRPC = true
- case "grpc-encoding":
- d.data.encoding = f.Value
- case "grpc-status":
- code, err := strconv.Atoi(f.Value)
- if err != nil {
- d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err)
- return
- }
- d.data.rawStatusCode = &code
- case "grpc-message":
- d.data.rawStatusMsg = decodeGrpcMessage(f.Value)
- case "grpc-status-details-bin":
- v, err := decodeBinHeader(f.Value)
- if err != nil {
- d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
- return
- }
- s := &spb.Status{}
- if err := proto.Unmarshal(v, s); err != nil {
- d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
- return
- }
- d.data.statusGen = status.FromProto(s)
- case "grpc-timeout":
- d.data.timeoutSet = true
- var err error
- if d.data.timeout, err = decodeTimeout(f.Value); err != nil {
- d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err)
- }
- case ":path":
- d.data.method = f.Value
- case ":status":
- code, err := strconv.Atoi(f.Value)
- if err != nil {
- d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err)
- return
- }
- d.data.httpStatus = &code
- case "grpc-tags-bin":
- v, err := decodeBinHeader(f.Value)
- if err != nil {
- d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err)
- return
- }
- d.data.statsTags = v
- d.addMetadata(f.Name, string(v))
- case "grpc-trace-bin":
- v, err := decodeBinHeader(f.Value)
- if err != nil {
- d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err)
- return
- }
- d.data.statsTrace = v
- d.addMetadata(f.Name, string(v))
- default:
- if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) {
- break
- }
- v, err := decodeMetadataHeader(f.Name, f.Value)
- if err != nil {
- errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
- return
- }
- d.addMetadata(f.Name, v)
- }
+ return status.FromProto(st), nil
}
type timeoutUnit uint8
@@ -449,41 +203,6 @@
return
}
-const maxTimeoutValue int64 = 100000000 - 1
-
-// div does integer division and round-up the result. Note that this is
-// equivalent to (d+r-1)/r but has less chance to overflow.
-func div(d, r time.Duration) int64 {
- if m := d % r; m > 0 {
- return int64(d/r + 1)
- }
- return int64(d / r)
-}
-
-// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it.
-func encodeTimeout(t time.Duration) string {
- if t <= 0 {
- return "0n"
- }
- if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "n"
- }
- if d := div(t, time.Microsecond); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "u"
- }
- if d := div(t, time.Millisecond); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "m"
- }
- if d := div(t, time.Second); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "S"
- }
- if d := div(t, time.Minute); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "M"
- }
- // Note that maxTimeoutValue * time.Hour > MaxInt64.
- return strconv.FormatInt(div(t, time.Hour), 10) + "H"
-}
-
func decodeTimeout(s string) (time.Duration, error) {
size := len(s)
if size < 2 {
@@ -675,3 +394,31 @@
f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
return f
}
+
+// parseDialTarget returns the network and address to pass to dialer.
+func parseDialTarget(target string) (string, string) {
+ net := "tcp"
+ m1 := strings.Index(target, ":")
+ m2 := strings.Index(target, ":/")
+ // handle unix:addr which will fail with url.Parse
+ if m1 >= 0 && m2 < 0 {
+ if n := target[0:m1]; n == "unix" {
+ return n, target[m1+1:]
+ }
+ }
+ if m2 >= 0 {
+ t, err := url.Parse(target)
+ if err != nil {
+ return net, target
+ }
+ scheme := t.Scheme
+ addr := t.Path
+ if scheme == "unix" {
+ if addr == "" {
+ addr = t.Host
+ }
+ return scheme, addr
+ }
+ }
+ return net, target
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/log.go b/vendor/google.golang.org/grpc/internal/transport/log.go
deleted file mode 100644
index 879df80..0000000
--- a/vendor/google.golang.org/grpc/internal/transport/log.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// This file contains wrappers for grpclog functions.
-// The transport package only logs to verbose level 2 by default.
-
-package transport
-
-import "google.golang.org/grpc/grpclog"
-
-const logLevel = 2
-
-func infof(format string, args ...interface{}) {
- if grpclog.V(logLevel) {
- grpclog.Infof(format, args...)
- }
-}
-
-func warningf(format string, args ...interface{}) {
- if grpclog.V(logLevel) {
- grpclog.Warningf(format, args...)
- }
-}
-
-func errorf(format string, args ...interface{}) {
- if grpclog.V(logLevel) {
- grpclog.Errorf(format, args...)
- }
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go
new file mode 100644
index 0000000..c11b527
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go
@@ -0,0 +1,46 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package networktype declares the network type to be used in the default
+// dialer. Attribute of a resolver.Address.
+package networktype
+
+import (
+ "google.golang.org/grpc/resolver"
+)
+
+// keyType is the key to use for storing State in Attributes.
+type keyType string
+
+const key = keyType("grpc.internal.transport.networktype")
+
+// Set returns a copy of the provided address with attributes containing networkType.
+func Set(address resolver.Address, networkType string) resolver.Address {
+ address.Attributes = address.Attributes.WithValue(key, networkType)
+ return address
+}
+
+// Get returns the network type in the resolver.Address and true, or "", false
+// if not present.
+func Get(address resolver.Address) (string, bool) {
+ v := address.Attributes.Value(key)
+ if v == nil {
+ return "", false
+ }
+ return v.(string), true
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go
new file mode 100644
index 0000000..4159619
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go
@@ -0,0 +1,142 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+ "bufio"
+ "context"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+)
+
+const proxyAuthHeaderKey = "Proxy-Authorization"
+
+var (
+ // The following variable will be overwritten in the tests.
+ httpProxyFromEnvironment = http.ProxyFromEnvironment
+)
+
+func mapAddress(address string) (*url.URL, error) {
+ req := &http.Request{
+ URL: &url.URL{
+ Scheme: "https",
+ Host: address,
+ },
+ }
+ url, err := httpProxyFromEnvironment(req)
+ if err != nil {
+ return nil, err
+ }
+ return url, nil
+}
+
+// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader.
+// It's possible that this reader reads more than what's need for the response and stores
+// those bytes in the buffer.
+// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the
+// bytes in the buffer.
+type bufConn struct {
+ net.Conn
+ r io.Reader
+}
+
+func (c *bufConn) Read(b []byte) (int, error) {
+ return c.r.Read(b)
+}
+
+func basicAuth(username, password string) string {
+ auth := username + ":" + password
+ return base64.StdEncoding.EncodeToString([]byte(auth))
+}
+
+func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) {
+ defer func() {
+ if err != nil {
+ conn.Close()
+ }
+ }()
+
+ req := &http.Request{
+ Method: http.MethodConnect,
+ URL: &url.URL{Host: backendAddr},
+ Header: map[string][]string{"User-Agent": {grpcUA}},
+ }
+ if t := proxyURL.User; t != nil {
+ u := t.Username()
+ p, _ := t.Password()
+ req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p))
+ }
+
+ if err := sendHTTPRequest(ctx, req, conn); err != nil {
+ return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
+ }
+
+ r := bufio.NewReader(conn)
+ resp, err := http.ReadResponse(r, req)
+ if err != nil {
+ return nil, fmt.Errorf("reading server HTTP response: %v", err)
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ dump, err := httputil.DumpResponse(resp, true)
+ if err != nil {
+ return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status)
+ }
+ return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump)
+ }
+
+ return &bufConn{Conn: conn, r: r}, nil
+}
+
+// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy
+// is necessary, dials, does the HTTP CONNECT handshake, and returns the
+// connection.
+func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) {
+ newAddr := addr
+ proxyURL, err := mapAddress(addr)
+ if err != nil {
+ return nil, err
+ }
+ if proxyURL != nil {
+ newAddr = proxyURL.Host
+ }
+
+ conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr)
+ if err != nil {
+ return
+ }
+ if proxyURL != nil {
+ // proxy is disabled if proxyURL is nil.
+ conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA)
+ }
+ return
+}
+
+func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
+ req = req.WithContext(ctx)
+ if err := req.Write(conn); err != nil {
+ return fmt.Errorf("failed to write the HTTP request: %v", err)
+ }
+ return nil
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index a30da9e..d3bf65b 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -30,16 +30,20 @@
"net"
"sync"
"sync/atomic"
+ "time"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/resolver"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/tap"
)
+const logLevel = 2
+
type bufferPool struct {
pool sync.Pool
}
@@ -238,6 +242,7 @@
ctx context.Context // the associated context of the stream
cancel context.CancelFunc // always nil for client side Stream
done chan struct{} // closed at the end of stream to unblock writers. On the client side.
+ doneFunc func() // invoked at the end of stream on client side.
ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance)
method string // the associated RPC method of the stream
recvCompress string
@@ -514,7 +519,8 @@
// ServerConfig consists of all the configurations to establish a server transport.
type ServerConfig struct {
MaxStreams uint32
- AuthInfo credentials.AuthInfo
+ ConnectionTimeout time.Duration
+ Credentials credentials.TransportCredentials
InTapHandle tap.ServerInHandle
StatsHandler stats.Handler
KeepaliveParams keepalive.ServerParameters
@@ -528,12 +534,6 @@
HeaderTableSize *uint32
}
-// NewServerTransport creates a ServerTransport with conn or non-nil error
-// if it fails.
-func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) {
- return newHTTP2Server(conn, config)
-}
-
// ConnectOptions covers all relevant options for communicating with the server.
type ConnectOptions struct {
// UserAgent is the application user agent.
@@ -566,19 +566,14 @@
ChannelzParentID int64
// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
MaxHeaderListSize *uint32
-}
-
-// TargetInfo contains the information of the target such as network address and metadata.
-type TargetInfo struct {
- Addr string
- Metadata interface{}
- Authority string
+ // UseProxy specifies if a proxy should be used.
+ UseProxy bool
}
// NewClientTransport establishes the transport with the required ConnectOptions
// and returns it to the caller.
-func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
- return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose)
+func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
+ return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose)
}
// Options provides additional hints and information for message
@@ -613,6 +608,8 @@
ContentSubtype string
PreviousAttempts int // value of grpc-previous-rpc-attempts header to set
+
+ DoneFunc func() // called when the stream is finished
}
// ClientTransport is the common interface for all gRPC client-side transport
@@ -621,7 +618,7 @@
// Close tears down this transport. Once it returns, the transport
// should not be accessed any more. The caller must make sure this
// is called only once.
- Close() error
+ Close(err error)
// GracefulClose starts to tear down the transport: the transport will stop
// accepting new RPCs and NewStream will return error. Once all streams are
@@ -655,8 +652,9 @@
// HTTP/2).
GoAway() <-chan struct{}
- // GetGoAwayReason returns the reason why GoAway frame was received.
- GetGoAwayReason() GoAwayReason
+ // GetGoAwayReason returns the reason why GoAway frame was received, along
+ // with a human readable string with debug info.
+ GetGoAwayReason() (GoAwayReason, string)
// RemoteAddr returns the remote network address.
RemoteAddr() net.Addr
@@ -692,7 +690,7 @@
// Close tears down the transport. Once it is called, the transport
// should not be accessed any more. All the pending streams and their
// handlers will be terminated asynchronously.
- Close() error
+ Close()
// RemoteAddr returns the remote network address.
RemoteAddr() net.Addr
diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go
new file mode 100644
index 0000000..e8b4927
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package internal
+
+import (
+ "google.golang.org/grpc/attributes"
+ "google.golang.org/grpc/resolver"
+)
+
+// handshakeClusterNameKey is the type used as the key to store cluster name in
+// the Attributes field of resolver.Address.
+type handshakeClusterNameKey struct{}
+
+// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field
+// is updated with the cluster name.
+func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address {
+ addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName)
+ return addr
+}
+
+// GetXDSHandshakeClusterName returns cluster name stored in attr.
+func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) {
+ v := attr.Value(handshakeClusterNameKey{})
+ name, ok := v.(string)
+ return name, ok
+}