Updating to latest protos and device-management interface, releasing 2.0
Change-Id: I2d2ebf5b305d6d06b8d01c49d4d67e7ff050f5d4
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
index 4f3526e..191bea4 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
@@ -1,57 +1,119 @@
+// Copyright 2015 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.12.2
// source: google/api/annotations.proto
package annotations
import (
- fmt "fmt"
- math "math"
+ reflect "reflect"
- proto "github.com/golang/protobuf/proto"
- descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-var E_Http = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MethodOptions)(nil),
- ExtensionType: (*HttpRule)(nil),
- Field: 72295728,
- Name: "google.api.http",
- Tag: "bytes,72295728,opt,name=http",
- Filename: "google/api/annotations.proto",
+var file_google_api_annotations_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.MethodOptions)(nil),
+ ExtensionType: (*HttpRule)(nil),
+ Field: 72295728,
+ Name: "google.api.http",
+ Tag: "bytes,72295728,opt,name=http",
+ Filename: "google/api/annotations.proto",
+ },
}
-func init() {
- proto.RegisterExtension(E_Http)
+// Extension fields to descriptorpb.MethodOptions.
+var (
+ // See `HttpRule`.
+ //
+ // optional google.api.HttpRule http = 72295728;
+ E_Http = &file_google_api_annotations_proto_extTypes[0]
+)
+
+var File_google_api_annotations_proto protoreflect.FileDescriptor
+
+var file_google_api_annotations_proto_rawDesc = []byte{
+ 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x3a, 0x4b, 0x0a, 0x04, 0x68, 0x74, 0x74, 0x70, 0x12, 0x1e, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65,
+ 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb0, 0xca, 0xbc, 0x22,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70,
+ 0x42, 0x6e, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+ 0x70, 0x69, 0x42, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
+ 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70,
+ 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
-func init() {
- proto.RegisterFile("google/api/annotations.proto", fileDescriptor_c591c5aa9fb79aab)
+var file_google_api_annotations_proto_goTypes = []interface{}{
+ (*descriptorpb.MethodOptions)(nil), // 0: google.protobuf.MethodOptions
+ (*HttpRule)(nil), // 1: google.api.HttpRule
+}
+var file_google_api_annotations_proto_depIdxs = []int32{
+ 0, // 0: google.api.http:extendee -> google.protobuf.MethodOptions
+ 1, // 1: google.api.http:type_name -> google.api.HttpRule
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 1, // [1:2] is the sub-list for extension type_name
+ 0, // [0:1] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
}
-var fileDescriptor_c591c5aa9fb79aab = []byte{
- // 208 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f,
- 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc,
- 0xcf, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xea, 0x25, 0x16, 0x64,
- 0x4a, 0x89, 0x22, 0xa9, 0xcc, 0x28, 0x29, 0x29, 0x80, 0x28, 0x91, 0x52, 0x80, 0x0a, 0x83, 0x79,
- 0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa9, 0xc5, 0xc9, 0x45, 0x99, 0x05, 0x25, 0xf9, 0x45, 0x10, 0x15,
- 0x56, 0xde, 0x5c, 0x2c, 0x20, 0xf5, 0x42, 0x72, 0x7a, 0x50, 0xd3, 0x60, 0x4a, 0xf5, 0x7c, 0x53,
- 0x4b, 0x32, 0xf2, 0x53, 0xfc, 0x0b, 0xc0, 0x56, 0x4a, 0x6c, 0x38, 0xb5, 0x47, 0x49, 0x81, 0x51,
- 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x61, 0xad, 0x9e, 0x47, 0x49, 0x49, 0x41, 0x50, 0x69, 0x4e, 0x6a,
- 0x10, 0xd8, 0x10, 0xa7, 0x3c, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x24, 0x05, 0x4e, 0x02, 0x8e, 0x08,
- 0x67, 0x07, 0x80, 0x4c, 0x0e, 0x60, 0x8c, 0x72, 0x84, 0xca, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5,
- 0xeb, 0xe5, 0x17, 0xa5, 0xeb, 0xa7, 0xa7, 0xe6, 0x81, 0xed, 0xd5, 0x87, 0x48, 0x25, 0x16, 0x64,
- 0x16, 0xa3, 0x7b, 0xda, 0x1a, 0x89, 0xbd, 0x88, 0x89, 0xc5, 0xdd, 0x31, 0xc0, 0x33, 0x89, 0x0d,
- 0xac, 0xc9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xe3, 0x29, 0x19, 0x62, 0x28, 0x01, 0x00, 0x00,
+func init() { file_google_api_annotations_proto_init() }
+func file_google_api_annotations_proto_init() {
+ if File_google_api_annotations_proto != nil {
+ return
+ }
+ file_google_api_http_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_api_annotations_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_google_api_annotations_proto_goTypes,
+ DependencyIndexes: file_google_api_annotations_proto_depIdxs,
+ ExtensionInfos: file_google_api_annotations_proto_extTypes,
+ }.Build()
+ File_google_api_annotations_proto = out.File
+ file_google_api_annotations_proto_rawDesc = nil
+ file_google_api_annotations_proto_goTypes = nil
+ file_google_api_annotations_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
index 9757593..66fdb65 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
@@ -1,81 +1,214 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.12.2
// source: google/api/client.proto
package annotations
import (
- fmt "fmt"
- math "math"
+ reflect "reflect"
- proto "github.com/golang/protobuf/proto"
- descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-var E_MethodSignature = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MethodOptions)(nil),
- ExtensionType: ([]string)(nil),
- Field: 1051,
- Name: "google.api.method_signature",
- Tag: "bytes,1051,rep,name=method_signature",
- Filename: "google/api/client.proto",
+var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.MethodOptions)(nil),
+ ExtensionType: ([]string)(nil),
+ Field: 1051,
+ Name: "google.api.method_signature",
+ Tag: "bytes,1051,rep,name=method_signature",
+ Filename: "google/api/client.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.ServiceOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 1049,
+ Name: "google.api.default_host",
+ Tag: "bytes,1049,opt,name=default_host",
+ Filename: "google/api/client.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.ServiceOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 1050,
+ Name: "google.api.oauth_scopes",
+ Tag: "bytes,1050,opt,name=oauth_scopes",
+ Filename: "google/api/client.proto",
+ },
}
-var E_DefaultHost = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.ServiceOptions)(nil),
- ExtensionType: (*string)(nil),
- Field: 1049,
- Name: "google.api.default_host",
- Tag: "bytes,1049,opt,name=default_host",
- Filename: "google/api/client.proto",
+// Extension fields to descriptorpb.MethodOptions.
+var (
+ // A definition of a client library method signature.
+ //
+ // In client libraries, each proto RPC corresponds to one or more methods
+ // which the end user is able to call, and calls the underlying RPC.
+ // Normally, this method receives a single argument (a struct or instance
+ // corresponding to the RPC request object). Defining this field will
+ // add one or more overloads providing flattened or simpler method signatures
+ // in some languages.
+ //
+ // The fields on the method signature are provided as a comma-separated
+ // string.
+ //
+ // For example, the proto RPC and annotation:
+ //
+ // rpc CreateSubscription(CreateSubscriptionRequest)
+ // returns (Subscription) {
+ // option (google.api.method_signature) = "name,topic";
+ // }
+ //
+ // Would add the following Java overload (in addition to the method accepting
+ // the request object):
+ //
+ // public final Subscription createSubscription(String name, String topic)
+ //
+ // The following backwards-compatibility guidelines apply:
+ //
+ // * Adding this annotation to an unannotated method is backwards
+ // compatible.
+ // * Adding this annotation to a method which already has existing
+ // method signature annotations is backwards compatible if and only if
+ // the new method signature annotation is last in the sequence.
+ // * Modifying or removing an existing method signature annotation is
+ // a breaking change.
+ // * Re-ordering existing method signature annotations is a breaking
+ // change.
+ //
+ // repeated string method_signature = 1051;
+ E_MethodSignature = &file_google_api_client_proto_extTypes[0]
+)
+
+// Extension fields to descriptorpb.ServiceOptions.
+var (
+ // The hostname for this service.
+ // This should be specified with no prefix or protocol.
+ //
+ // Example:
+ //
+ // service Foo {
+ // option (google.api.default_host) = "foo.googleapi.com";
+ // ...
+ // }
+ //
+ // optional string default_host = 1049;
+ E_DefaultHost = &file_google_api_client_proto_extTypes[1]
+ // OAuth scopes needed for the client.
+ //
+ // Example:
+ //
+ // service Foo {
+ // option (google.api.oauth_scopes) = \
+ // "https://www.googleapis.com/auth/cloud-platform";
+ // ...
+ // }
+ //
+ // If there is more than one scope, use a comma-separated string:
+ //
+ // Example:
+ //
+ // service Foo {
+ // option (google.api.oauth_scopes) = \
+ // "https://www.googleapis.com/auth/cloud-platform,"
+ // "https://www.googleapis.com/auth/monitoring";
+ // ...
+ // }
+ //
+ // optional string oauth_scopes = 1050;
+ E_OauthScopes = &file_google_api_client_proto_extTypes[2]
+)
+
+var File_google_api_client_proto protoreflect.FileDescriptor
+
+var file_google_api_client_proto_rawDesc = []byte{
+ 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f,
+ 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65,
+ 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68,
+ 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66,
+ 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74,
+ 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x69, 0x0a,
+ 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42,
+ 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72,
+ 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
-var E_OauthScopes = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.ServiceOptions)(nil),
- ExtensionType: (*string)(nil),
- Field: 1050,
- Name: "google.api.oauth_scopes",
- Tag: "bytes,1050,opt,name=oauth_scopes",
- Filename: "google/api/client.proto",
+var file_google_api_client_proto_goTypes = []interface{}{
+ (*descriptorpb.MethodOptions)(nil), // 0: google.protobuf.MethodOptions
+ (*descriptorpb.ServiceOptions)(nil), // 1: google.protobuf.ServiceOptions
+}
+var file_google_api_client_proto_depIdxs = []int32{
+ 0, // 0: google.api.method_signature:extendee -> google.protobuf.MethodOptions
+ 1, // 1: google.api.default_host:extendee -> google.protobuf.ServiceOptions
+ 1, // 2: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 0, // [0:3] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
}
-func init() {
- proto.RegisterExtension(E_MethodSignature)
- proto.RegisterExtension(E_DefaultHost)
- proto.RegisterExtension(E_OauthScopes)
-}
-
-func init() {
- proto.RegisterFile("google/api/client.proto", fileDescriptor_78f2c6f7c3a942c1)
-}
-
-var fileDescriptor_78f2c6f7c3a942c1 = []byte{
- // 262 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x90, 0x3f, 0x4f, 0xc3, 0x30,
- 0x10, 0xc5, 0x55, 0x40, 0xa8, 0x75, 0x11, 0xa0, 0x2c, 0x20, 0x06, 0xc8, 0xd8, 0xc9, 0x1e, 0xd8,
- 0xca, 0xd4, 0x76, 0xe0, 0x8f, 0x84, 0x88, 0x9a, 0x8d, 0x25, 0x72, 0x9d, 0xab, 0x63, 0x29, 0xf5,
- 0x59, 0xf6, 0x85, 0xef, 0x02, 0x6c, 0x7c, 0x52, 0x54, 0xc7, 0x11, 0x48, 0x0c, 0x6c, 0x27, 0xbd,
- 0xf7, 0xfb, 0x9d, 0xf4, 0xd8, 0x85, 0x46, 0xd4, 0x2d, 0x08, 0xe9, 0x8c, 0x50, 0xad, 0x01, 0x4b,
- 0xdc, 0x79, 0x24, 0xcc, 0x58, 0x1f, 0x70, 0xe9, 0xcc, 0x55, 0x9e, 0x4a, 0x31, 0xd9, 0x74, 0x5b,
- 0x51, 0x43, 0x50, 0xde, 0x38, 0x42, 0xdf, 0xb7, 0xe7, 0x4f, 0xec, 0x7c, 0x07, 0xd4, 0x60, 0x5d,
- 0x05, 0xa3, 0xad, 0xa4, 0xce, 0x43, 0x76, 0xcd, 0x93, 0x62, 0xc0, 0xf8, 0x73, 0xac, 0xbc, 0x38,
- 0x32, 0x68, 0xc3, 0xe5, 0xe7, 0x38, 0x3f, 0x9c, 0x4d, 0xd6, 0x67, 0x3d, 0x58, 0x0e, 0xdc, 0x7c,
- 0xc5, 0x4e, 0x6a, 0xd8, 0xca, 0xae, 0xa5, 0xaa, 0xc1, 0x40, 0xd9, 0xcd, 0x1f, 0x4f, 0x09, 0xfe,
- 0xcd, 0x28, 0x18, 0x44, 0xef, 0xe3, 0x7c, 0x34, 0x9b, 0xac, 0xa7, 0x89, 0x7a, 0xc0, 0x40, 0x7b,
- 0x09, 0xca, 0x8e, 0x9a, 0x2a, 0x28, 0x74, 0x10, 0xfe, 0x97, 0x7c, 0x24, 0x49, 0xa4, 0xca, 0x08,
- 0x2d, 0x0d, 0x3b, 0x55, 0xb8, 0xe3, 0x3f, 0x4b, 0x2c, 0xa7, 0xab, 0xb8, 0x51, 0xb1, 0x97, 0x14,
- 0xa3, 0xd7, 0x45, 0x8a, 0x34, 0xb6, 0xd2, 0x6a, 0x8e, 0x5e, 0x0b, 0x0d, 0x36, 0xbe, 0x10, 0x7d,
- 0x24, 0x9d, 0x09, 0x71, 0x5c, 0x69, 0x2d, 0x92, 0x8c, 0xbf, 0xee, 0x7e, 0xdd, 0x5f, 0x07, 0x47,
- 0xf7, 0x8b, 0xe2, 0x71, 0x73, 0x1c, 0xa1, 0xdb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcc, 0xc2,
- 0xcf, 0x71, 0x90, 0x01, 0x00, 0x00,
+func init() { file_google_api_client_proto_init() }
+func file_google_api_client_proto_init() {
+ if File_google_api_client_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_api_client_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 3,
+ NumServices: 0,
+ },
+ GoTypes: file_google_api_client_proto_goTypes,
+ DependencyIndexes: file_google_api_client_proto_depIdxs,
+ ExtensionInfos: file_google_api_client_proto_extTypes,
+ }.Build()
+ File_google_api_client_proto = out.File
+ file_google_api_client_proto_rawDesc = nil
+ file_google_api_client_proto_goTypes = nil
+ file_google_api_client_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go
index 6e67a93..164e0df 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go
@@ -1,26 +1,40 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.27.1
+// protoc v3.12.2
// source: google/api/field_behavior.proto
package annotations
import (
- fmt "fmt"
- math "math"
+ reflect "reflect"
+ sync "sync"
- proto "github.com/golang/protobuf/proto"
- descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
// An indicator of the behavior of a given field (for example, that a field
// is required in requests, or given as output but ignored as input).
@@ -54,71 +68,183 @@
// This indicates that the field may be set once in a request to create a
// resource, but may not be changed thereafter.
FieldBehavior_IMMUTABLE FieldBehavior = 5
+ // Denotes that a (repeated) field is an unordered list.
+ // This indicates that the service may provide the elements of the list
+ // in any arbitrary order, rather than the order the user originally
+ // provided. Additionally, the list's order may or may not be stable.
+ FieldBehavior_UNORDERED_LIST FieldBehavior = 6
+ // Denotes that this field returns a non-empty default value if not set.
+ // This indicates that if the user provides the empty value in a request,
+ // a non-empty value will be returned. The user will not be aware of what
+ // non-empty value to expect.
+ FieldBehavior_NON_EMPTY_DEFAULT FieldBehavior = 7
)
-var FieldBehavior_name = map[int32]string{
- 0: "FIELD_BEHAVIOR_UNSPECIFIED",
- 1: "OPTIONAL",
- 2: "REQUIRED",
- 3: "OUTPUT_ONLY",
- 4: "INPUT_ONLY",
- 5: "IMMUTABLE",
-}
+// Enum value maps for FieldBehavior.
+var (
+ FieldBehavior_name = map[int32]string{
+ 0: "FIELD_BEHAVIOR_UNSPECIFIED",
+ 1: "OPTIONAL",
+ 2: "REQUIRED",
+ 3: "OUTPUT_ONLY",
+ 4: "INPUT_ONLY",
+ 5: "IMMUTABLE",
+ 6: "UNORDERED_LIST",
+ 7: "NON_EMPTY_DEFAULT",
+ }
+ FieldBehavior_value = map[string]int32{
+ "FIELD_BEHAVIOR_UNSPECIFIED": 0,
+ "OPTIONAL": 1,
+ "REQUIRED": 2,
+ "OUTPUT_ONLY": 3,
+ "INPUT_ONLY": 4,
+ "IMMUTABLE": 5,
+ "UNORDERED_LIST": 6,
+ "NON_EMPTY_DEFAULT": 7,
+ }
+)
-var FieldBehavior_value = map[string]int32{
- "FIELD_BEHAVIOR_UNSPECIFIED": 0,
- "OPTIONAL": 1,
- "REQUIRED": 2,
- "OUTPUT_ONLY": 3,
- "INPUT_ONLY": 4,
- "IMMUTABLE": 5,
+func (x FieldBehavior) Enum() *FieldBehavior {
+ p := new(FieldBehavior)
+ *p = x
+ return p
}
func (x FieldBehavior) String() string {
- return proto.EnumName(FieldBehavior_name, int32(x))
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
+func (FieldBehavior) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_api_field_behavior_proto_enumTypes[0].Descriptor()
+}
+
+func (FieldBehavior) Type() protoreflect.EnumType {
+ return &file_google_api_field_behavior_proto_enumTypes[0]
+}
+
+func (x FieldBehavior) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use FieldBehavior.Descriptor instead.
func (FieldBehavior) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_4648f18fd5079967, []int{0}
+ return file_google_api_field_behavior_proto_rawDescGZIP(), []int{0}
}
-var E_FieldBehavior = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FieldOptions)(nil),
- ExtensionType: ([]FieldBehavior)(nil),
- Field: 1052,
- Name: "google.api.field_behavior",
- Tag: "varint,1052,rep,name=field_behavior,enum=google.api.FieldBehavior",
- Filename: "google/api/field_behavior.proto",
+var file_google_api_field_behavior_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: ([]FieldBehavior)(nil),
+ Field: 1052,
+ Name: "google.api.field_behavior",
+ Tag: "varint,1052,rep,name=field_behavior,enum=google.api.FieldBehavior",
+ Filename: "google/api/field_behavior.proto",
+ },
}
-func init() {
- proto.RegisterEnum("google.api.FieldBehavior", FieldBehavior_name, FieldBehavior_value)
- proto.RegisterExtension(E_FieldBehavior)
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // A designation of a specific field behavior (required, output only, etc.)
+ // in protobuf messages.
+ //
+ // Examples:
+ //
+ // string name = 1 [(google.api.field_behavior) = REQUIRED];
+ // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
+ // google.protobuf.Duration ttl = 1
+ // [(google.api.field_behavior) = INPUT_ONLY];
+ // google.protobuf.Timestamp expire_time = 1
+ // [(google.api.field_behavior) = OUTPUT_ONLY,
+ // (google.api.field_behavior) = IMMUTABLE];
+ //
+ // repeated google.api.FieldBehavior field_behavior = 1052;
+ E_FieldBehavior = &file_google_api_field_behavior_proto_extTypes[0]
+)
+
+var File_google_api_field_behavior_proto protoreflect.FileDescriptor
+
+var file_google_api_field_behavior_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65,
+ 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a,
+ 0xa6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f,
+ 0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56,
+ 0x49, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
+ 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12,
+ 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0f, 0x0a,
+ 0x0b, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x03, 0x12, 0x0e,
+ 0x0a, 0x0a, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x04, 0x12, 0x0d,
+ 0x0a, 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x12, 0x0a,
+ 0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10,
+ 0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44,
+ 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c,
+ 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
+ 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e,
+ 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x52, 0x0d, 0x66, 0x69, 0x65,
+ 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x70, 0x0a, 0x0e, 0x63, 0x6f,
+ 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x12, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
+ 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
}
-func init() {
- proto.RegisterFile("google/api/field_behavior.proto", fileDescriptor_4648f18fd5079967)
+var (
+ file_google_api_field_behavior_proto_rawDescOnce sync.Once
+ file_google_api_field_behavior_proto_rawDescData = file_google_api_field_behavior_proto_rawDesc
+)
+
+func file_google_api_field_behavior_proto_rawDescGZIP() []byte {
+ file_google_api_field_behavior_proto_rawDescOnce.Do(func() {
+ file_google_api_field_behavior_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_behavior_proto_rawDescData)
+ })
+ return file_google_api_field_behavior_proto_rawDescData
}
-var fileDescriptor_4648f18fd5079967 = []byte{
- // 303 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0x4f, 0x4f, 0xb3, 0x30,
- 0x1c, 0xc7, 0x9f, 0xfd, 0x79, 0xcc, 0xac, 0x0e, 0x49, 0x4f, 0xba, 0x44, 0xdd, 0xd1, 0x78, 0x28,
- 0x89, 0xde, 0xf4, 0x04, 0xae, 0xd3, 0x26, 0x8c, 0x56, 0x04, 0x13, 0xbd, 0x60, 0xb7, 0xb1, 0xda,
- 0x64, 0xd2, 0x06, 0xd0, 0x8b, 0x6f, 0xc5, 0x93, 0xaf, 0xd4, 0xd0, 0x31, 0x85, 0x5b, 0xbf, 0xf9,
- 0x7d, 0xfa, 0xeb, 0xe7, 0x5b, 0x70, 0x2a, 0x94, 0x12, 0xeb, 0xd4, 0xe1, 0x5a, 0x3a, 0x2b, 0x99,
- 0xae, 0x97, 0xc9, 0x3c, 0x7d, 0xe5, 0x1f, 0x52, 0xe5, 0x48, 0xe7, 0xaa, 0x54, 0x10, 0x6c, 0x00,
- 0xc4, 0xb5, 0x1c, 0x8d, 0x6b, 0xd8, 0x4c, 0xe6, 0xef, 0x2b, 0x67, 0x99, 0x16, 0x8b, 0x5c, 0xea,
- 0x72, 0x4b, 0x9f, 0x7f, 0x82, 0xe1, 0xb4, 0xda, 0xe2, 0xd5, 0x4b, 0xe0, 0x09, 0x18, 0x4d, 0x09,
- 0xf6, 0x27, 0x89, 0x87, 0xef, 0xdc, 0x47, 0x42, 0xc3, 0x24, 0x0e, 0x1e, 0x18, 0xbe, 0x21, 0x53,
- 0x82, 0x27, 0xf6, 0x3f, 0xb8, 0x0f, 0x06, 0x94, 0x45, 0x84, 0x06, 0xae, 0x6f, 0x77, 0xaa, 0x14,
- 0xe2, 0xfb, 0x98, 0x84, 0x78, 0x62, 0x77, 0xe1, 0x01, 0xd8, 0xa3, 0x71, 0xc4, 0xe2, 0x28, 0xa1,
- 0x81, 0xff, 0x64, 0xf7, 0xa0, 0x05, 0x00, 0x09, 0x7e, 0x73, 0x1f, 0x0e, 0xc1, 0x2e, 0x99, 0xcd,
- 0xe2, 0xc8, 0xf5, 0x7c, 0x6c, 0xff, 0xbf, 0x7a, 0x01, 0x56, 0xbb, 0x02, 0x3c, 0x46, 0xb5, 0xfd,
- 0xd6, 0x18, 0x19, 0x3b, 0xaa, 0x4b, 0xa9, 0xb2, 0xe2, 0xf0, 0x6b, 0x30, 0xee, 0x9d, 0x59, 0x17,
- 0x47, 0xe8, 0xaf, 0x23, 0x6a, 0xe9, 0x87, 0xc3, 0x55, 0x33, 0x7a, 0x1a, 0x58, 0x0b, 0xf5, 0xd6,
- 0xc0, 0x3d, 0xd8, 0xe2, 0x59, 0xf5, 0x0c, 0xeb, 0x3c, 0xbb, 0x35, 0x21, 0xd4, 0x9a, 0x67, 0x02,
- 0xa9, 0x5c, 0x38, 0x22, 0xcd, 0x8c, 0x84, 0xb3, 0x19, 0x71, 0x2d, 0x0b, 0xf3, 0xe9, 0x3c, 0xcb,
- 0x54, 0xc9, 0x8d, 0xcf, 0x75, 0xe3, 0xfc, 0xdd, 0xed, 0xdf, 0xba, 0x8c, 0xcc, 0x77, 0xcc, 0xa5,
- 0xcb, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x94, 0x57, 0x94, 0xa8, 0x01, 0x00, 0x00,
+var file_google_api_field_behavior_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_google_api_field_behavior_proto_goTypes = []interface{}{
+ (FieldBehavior)(0), // 0: google.api.FieldBehavior
+ (*descriptorpb.FieldOptions)(nil), // 1: google.protobuf.FieldOptions
+}
+var file_google_api_field_behavior_proto_depIdxs = []int32{
+ 1, // 0: google.api.field_behavior:extendee -> google.protobuf.FieldOptions
+ 0, // 1: google.api.field_behavior:type_name -> google.api.FieldBehavior
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 1, // [1:2] is the sub-list for extension type_name
+ 0, // [0:1] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_api_field_behavior_proto_init() }
+func file_google_api_field_behavior_proto_init() {
+ if File_google_api_field_behavior_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_api_field_behavior_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 0,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_google_api_field_behavior_proto_goTypes,
+ DependencyIndexes: file_google_api_field_behavior_proto_depIdxs,
+ EnumInfos: file_google_api_field_behavior_proto_enumTypes,
+ ExtensionInfos: file_google_api_field_behavior_proto_extTypes,
+ }.Build()
+ File_google_api_field_behavior_proto = out.File
+ file_google_api_field_behavior_proto_rawDesc = nil
+ file_google_api_field_behavior_proto_goTypes = nil
+ file_google_api_field_behavior_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
index 6ed6f3c..4f34ab7 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
@@ -1,30 +1,48 @@
+// Copyright 2015 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.12.2
// source: google/api/http.proto
package annotations
import (
- fmt "fmt"
- math "math"
+ reflect "reflect"
+ sync "sync"
- proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
// Defines the HTTP configuration for an API service. It contains a list of
// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method
// to one or more HTTP REST API methods.
type Http struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// A list of HTTP configuration rules that apply to individual API methods.
//
// **NOTE:** All service configuration rules follow "last one wins" order.
@@ -35,47 +53,51 @@
//
// The default behavior is to not decode RFC 6570 reserved characters in multi
// segment matches.
- FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"`
}
-func (m *Http) Reset() { *m = Http{} }
-func (m *Http) String() string { return proto.CompactTextString(m) }
-func (*Http) ProtoMessage() {}
+func (x *Http) Reset() {
+ *x = Http{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_http_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Http) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Http) ProtoMessage() {}
+
+func (x *Http) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_http_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Http.ProtoReflect.Descriptor instead.
func (*Http) Descriptor() ([]byte, []int) {
- return fileDescriptor_ff9994be407cdcc9, []int{0}
+ return file_google_api_http_proto_rawDescGZIP(), []int{0}
}
-func (m *Http) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Http.Unmarshal(m, b)
-}
-func (m *Http) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Http.Marshal(b, m, deterministic)
-}
-func (m *Http) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Http.Merge(m, src)
-}
-func (m *Http) XXX_Size() int {
- return xxx_messageInfo_Http.Size(m)
-}
-func (m *Http) XXX_DiscardUnknown() {
- xxx_messageInfo_Http.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Http proto.InternalMessageInfo
-
-func (m *Http) GetRules() []*HttpRule {
- if m != nil {
- return m.Rules
+func (x *Http) GetRules() []*HttpRule {
+ if x != nil {
+ return x.Rules
}
return nil
}
-func (m *Http) GetFullyDecodeReservedExpansion() bool {
- if m != nil {
- return m.FullyDecodeReservedExpansion
+func (x *Http) GetFullyDecodeReservedExpansion() bool {
+ if x != nil {
+ return x.FullyDecodeReservedExpansion
}
return false
}
@@ -350,6 +372,10 @@
// the request or response body to a repeated field. However, some gRPC
// Transcoding implementations may not support this feature.
type HttpRule struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// Selects a method to which this rule applies.
//
// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
@@ -358,7 +384,7 @@
// used with any of the {get|put|post|delete|patch} methods. A custom method
// can be defined using the 'custom' field.
//
- // Types that are valid to be assigned to Pattern:
+ // Types that are assignable to Pattern:
// *HttpRule_Get
// *HttpRule_Put
// *HttpRule_Post
@@ -383,69 +409,153 @@
// Additional HTTP bindings for the selector. Nested bindings must
// not contain an `additional_bindings` field themselves (that is,
// the nesting may only be one level deep).
- AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"`
}
-func (m *HttpRule) Reset() { *m = HttpRule{} }
-func (m *HttpRule) String() string { return proto.CompactTextString(m) }
-func (*HttpRule) ProtoMessage() {}
+func (x *HttpRule) Reset() {
+ *x = HttpRule{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_http_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HttpRule) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HttpRule) ProtoMessage() {}
+
+func (x *HttpRule) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_http_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HttpRule.ProtoReflect.Descriptor instead.
func (*HttpRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_ff9994be407cdcc9, []int{1}
+ return file_google_api_http_proto_rawDescGZIP(), []int{1}
}
-func (m *HttpRule) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_HttpRule.Unmarshal(m, b)
-}
-func (m *HttpRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_HttpRule.Marshal(b, m, deterministic)
-}
-func (m *HttpRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HttpRule.Merge(m, src)
-}
-func (m *HttpRule) XXX_Size() int {
- return xxx_messageInfo_HttpRule.Size(m)
-}
-func (m *HttpRule) XXX_DiscardUnknown() {
- xxx_messageInfo_HttpRule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HttpRule proto.InternalMessageInfo
-
-func (m *HttpRule) GetSelector() string {
- if m != nil {
- return m.Selector
+func (x *HttpRule) GetSelector() string {
+ if x != nil {
+ return x.Selector
}
return ""
}
+func (m *HttpRule) GetPattern() isHttpRule_Pattern {
+ if m != nil {
+ return m.Pattern
+ }
+ return nil
+}
+
+func (x *HttpRule) GetGet() string {
+ if x, ok := x.GetPattern().(*HttpRule_Get); ok {
+ return x.Get
+ }
+ return ""
+}
+
+func (x *HttpRule) GetPut() string {
+ if x, ok := x.GetPattern().(*HttpRule_Put); ok {
+ return x.Put
+ }
+ return ""
+}
+
+func (x *HttpRule) GetPost() string {
+ if x, ok := x.GetPattern().(*HttpRule_Post); ok {
+ return x.Post
+ }
+ return ""
+}
+
+func (x *HttpRule) GetDelete() string {
+ if x, ok := x.GetPattern().(*HttpRule_Delete); ok {
+ return x.Delete
+ }
+ return ""
+}
+
+func (x *HttpRule) GetPatch() string {
+ if x, ok := x.GetPattern().(*HttpRule_Patch); ok {
+ return x.Patch
+ }
+ return ""
+}
+
+func (x *HttpRule) GetCustom() *CustomHttpPattern {
+ if x, ok := x.GetPattern().(*HttpRule_Custom); ok {
+ return x.Custom
+ }
+ return nil
+}
+
+func (x *HttpRule) GetBody() string {
+ if x != nil {
+ return x.Body
+ }
+ return ""
+}
+
+func (x *HttpRule) GetResponseBody() string {
+ if x != nil {
+ return x.ResponseBody
+ }
+ return ""
+}
+
+func (x *HttpRule) GetAdditionalBindings() []*HttpRule {
+ if x != nil {
+ return x.AdditionalBindings
+ }
+ return nil
+}
+
type isHttpRule_Pattern interface {
isHttpRule_Pattern()
}
type HttpRule_Get struct {
+ // Maps to HTTP GET. Used for listing and getting information about
+ // resources.
Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"`
}
type HttpRule_Put struct {
+ // Maps to HTTP PUT. Used for replacing a resource.
Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"`
}
type HttpRule_Post struct {
+ // Maps to HTTP POST. Used for creating a resource or performing an action.
Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"`
}
type HttpRule_Delete struct {
+ // Maps to HTTP DELETE. Used for deleting a resource.
Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"`
}
type HttpRule_Patch struct {
+ // Maps to HTTP PATCH. Used for updating a resource.
Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"`
}
type HttpRule_Custom struct {
+ // The custom pattern is used for specifying an HTTP method that is not
+ // included in the `pattern` field, such as HEAD, or "*" to leave the
+ // HTTP method unspecified for this rule. The wild-card rule is useful
+ // for services that provide content to Web (HTML) clients.
Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"`
}
@@ -461,79 +571,185 @@
func (*HttpRule_Custom) isHttpRule_Pattern() {}
-func (m *HttpRule) GetPattern() isHttpRule_Pattern {
- if m != nil {
- return m.Pattern
- }
- return nil
+// A custom pattern is used for defining custom HTTP verb.
+type CustomHttpPattern struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of this custom HTTP verb.
+ Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"`
+ // The path matched by this custom verb.
+ Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
}
-func (m *HttpRule) GetGet() string {
- if x, ok := m.GetPattern().(*HttpRule_Get); ok {
- return x.Get
+func (x *CustomHttpPattern) Reset() {
+ *x = CustomHttpPattern{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_http_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CustomHttpPattern) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CustomHttpPattern) ProtoMessage() {}
+
+func (x *CustomHttpPattern) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_http_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CustomHttpPattern.ProtoReflect.Descriptor instead.
+func (*CustomHttpPattern) Descriptor() ([]byte, []int) {
+ return file_google_api_http_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *CustomHttpPattern) GetKind() string {
+ if x != nil {
+ return x.Kind
}
return ""
}
-func (m *HttpRule) GetPut() string {
- if x, ok := m.GetPattern().(*HttpRule_Put); ok {
- return x.Put
+func (x *CustomHttpPattern) GetPath() string {
+ if x != nil {
+ return x.Path
}
return ""
}
-func (m *HttpRule) GetPost() string {
- if x, ok := m.GetPattern().(*HttpRule_Post); ok {
- return x.Post
- }
- return ""
+var File_google_api_http_proto protoreflect.FileDescriptor
+
+var file_google_api_http_proto_rawDesc = []byte{
+ 0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74,
+ 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x61, 0x70, 0x69, 0x22, 0x79, 0x0a, 0x04, 0x48, 0x74, 0x74, 0x70, 0x12, 0x2a, 0x0a, 0x05, 0x72,
+ 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65,
+ 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x1f, 0x66, 0x75, 0x6c, 0x6c, 0x79,
+ 0x5f, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
+ 0x5f, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x1c, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x64, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xda,
+ 0x02, 0x0a, 0x08, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73,
+ 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73,
+ 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x03, 0x70,
+ 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12,
+ 0x14, 0x0a, 0x04, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52,
+ 0x04, 0x70, 0x6f, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12,
+ 0x16, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00,
+ 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f,
+ 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50,
+ 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d,
+ 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x62, 0x6f, 0x64, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x13, 0x61, 0x64, 0x64,
+ 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73,
+ 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x12, 0x61, 0x64,
+ 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73,
+ 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x22, 0x3b, 0x0a, 0x11, 0x43,
+ 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e,
+ 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x6a, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x09, 0x48, 0x74, 0x74, 0x70,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61,
+ 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04,
+ 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
-func (m *HttpRule) GetDelete() string {
- if x, ok := m.GetPattern().(*HttpRule_Delete); ok {
- return x.Delete
- }
- return ""
+var (
+ file_google_api_http_proto_rawDescOnce sync.Once
+ file_google_api_http_proto_rawDescData = file_google_api_http_proto_rawDesc
+)
+
+func file_google_api_http_proto_rawDescGZIP() []byte {
+ file_google_api_http_proto_rawDescOnce.Do(func() {
+ file_google_api_http_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_http_proto_rawDescData)
+ })
+ return file_google_api_http_proto_rawDescData
}
-func (m *HttpRule) GetPatch() string {
- if x, ok := m.GetPattern().(*HttpRule_Patch); ok {
- return x.Patch
- }
- return ""
+var file_google_api_http_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_google_api_http_proto_goTypes = []interface{}{
+ (*Http)(nil), // 0: google.api.Http
+ (*HttpRule)(nil), // 1: google.api.HttpRule
+ (*CustomHttpPattern)(nil), // 2: google.api.CustomHttpPattern
+}
+var file_google_api_http_proto_depIdxs = []int32{
+ 1, // 0: google.api.Http.rules:type_name -> google.api.HttpRule
+ 2, // 1: google.api.HttpRule.custom:type_name -> google.api.CustomHttpPattern
+ 1, // 2: google.api.HttpRule.additional_bindings:type_name -> google.api.HttpRule
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
}
-func (m *HttpRule) GetCustom() *CustomHttpPattern {
- if x, ok := m.GetPattern().(*HttpRule_Custom); ok {
- return x.Custom
+func init() { file_google_api_http_proto_init() }
+func file_google_api_http_proto_init() {
+ if File_google_api_http_proto != nil {
+ return
}
- return nil
-}
-
-func (m *HttpRule) GetBody() string {
- if m != nil {
- return m.Body
+ if !protoimpl.UnsafeEnabled {
+ file_google_api_http_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Http); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_http_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HttpRule); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_http_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CustomHttpPattern); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
}
- return ""
-}
-
-func (m *HttpRule) GetResponseBody() string {
- if m != nil {
- return m.ResponseBody
- }
- return ""
-}
-
-func (m *HttpRule) GetAdditionalBindings() []*HttpRule {
- if m != nil {
- return m.AdditionalBindings
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*HttpRule) XXX_OneofWrappers() []interface{} {
- return []interface{}{
+ file_google_api_http_proto_msgTypes[1].OneofWrappers = []interface{}{
(*HttpRule_Get)(nil),
(*HttpRule_Put)(nil),
(*HttpRule_Post)(nil),
@@ -541,95 +757,22 @@
(*HttpRule_Patch)(nil),
(*HttpRule_Custom)(nil),
}
-}
-
-// A custom pattern is used for defining custom HTTP verb.
-type CustomHttpPattern struct {
- // The name of this custom HTTP verb.
- Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"`
- // The path matched by this custom verb.
- Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} }
-func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) }
-func (*CustomHttpPattern) ProtoMessage() {}
-func (*CustomHttpPattern) Descriptor() ([]byte, []int) {
- return fileDescriptor_ff9994be407cdcc9, []int{2}
-}
-
-func (m *CustomHttpPattern) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CustomHttpPattern.Unmarshal(m, b)
-}
-func (m *CustomHttpPattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CustomHttpPattern.Marshal(b, m, deterministic)
-}
-func (m *CustomHttpPattern) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CustomHttpPattern.Merge(m, src)
-}
-func (m *CustomHttpPattern) XXX_Size() int {
- return xxx_messageInfo_CustomHttpPattern.Size(m)
-}
-func (m *CustomHttpPattern) XXX_DiscardUnknown() {
- xxx_messageInfo_CustomHttpPattern.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CustomHttpPattern proto.InternalMessageInfo
-
-func (m *CustomHttpPattern) GetKind() string {
- if m != nil {
- return m.Kind
- }
- return ""
-}
-
-func (m *CustomHttpPattern) GetPath() string {
- if m != nil {
- return m.Path
- }
- return ""
-}
-
-func init() {
- proto.RegisterType((*Http)(nil), "google.api.Http")
- proto.RegisterType((*HttpRule)(nil), "google.api.HttpRule")
- proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern")
-}
-
-func init() {
- proto.RegisterFile("google/api/http.proto", fileDescriptor_ff9994be407cdcc9)
-}
-
-var fileDescriptor_ff9994be407cdcc9 = []byte{
- // 419 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x8e, 0xd3, 0x30,
- 0x10, 0x86, 0x49, 0x9b, 0x76, 0xdb, 0xe9, 0x82, 0x84, 0x59, 0x90, 0x85, 0x40, 0x54, 0xe5, 0x52,
- 0x71, 0x48, 0xa5, 0xe5, 0xc0, 0x61, 0x4f, 0x1b, 0xa8, 0x58, 0x6e, 0x55, 0x8e, 0x5c, 0x22, 0x37,
- 0x1e, 0x52, 0x83, 0xd7, 0xb6, 0xe2, 0x09, 0xa2, 0xaf, 0xc3, 0x63, 0xf1, 0x24, 0x1c, 0x91, 0x9d,
- 0x84, 0x56, 0x42, 0xe2, 0x36, 0xf3, 0xff, 0x9f, 0xa7, 0x7f, 0x27, 0x03, 0x4f, 0x6b, 0x6b, 0x6b,
- 0x8d, 0x1b, 0xe1, 0xd4, 0xe6, 0x40, 0xe4, 0x32, 0xd7, 0x58, 0xb2, 0x0c, 0x3a, 0x39, 0x13, 0x4e,
- 0xad, 0x8e, 0x90, 0xde, 0x11, 0x39, 0xf6, 0x06, 0x26, 0x4d, 0xab, 0xd1, 0xf3, 0x64, 0x39, 0x5e,
- 0x2f, 0xae, 0xaf, 0xb2, 0x13, 0x93, 0x05, 0xa0, 0x68, 0x35, 0x16, 0x1d, 0xc2, 0xb6, 0xf0, 0xea,
- 0x4b, 0xab, 0xf5, 0xb1, 0x94, 0x58, 0x59, 0x89, 0x65, 0x83, 0x1e, 0x9b, 0xef, 0x28, 0x4b, 0xfc,
- 0xe1, 0x84, 0xf1, 0xca, 0x1a, 0x3e, 0x5a, 0x26, 0xeb, 0x59, 0xf1, 0x22, 0x62, 0x1f, 0x22, 0x55,
- 0xf4, 0xd0, 0x76, 0x60, 0x56, 0xbf, 0x46, 0x30, 0x1b, 0x46, 0xb3, 0xe7, 0x30, 0xf3, 0xa8, 0xb1,
- 0x22, 0xdb, 0xf0, 0x64, 0x99, 0xac, 0xe7, 0xc5, 0xdf, 0x9e, 0x31, 0x18, 0xd7, 0x48, 0x71, 0xe6,
- 0xfc, 0xee, 0x41, 0x11, 0x9a, 0xa0, 0xb9, 0x96, 0xf8, 0x78, 0xd0, 0x5c, 0x4b, 0xec, 0x0a, 0x52,
- 0x67, 0x3d, 0xf1, 0xb4, 0x17, 0x63, 0xc7, 0x38, 0x4c, 0x25, 0x6a, 0x24, 0xe4, 0x93, 0x5e, 0xef,
- 0x7b, 0xf6, 0x0c, 0x26, 0x4e, 0x50, 0x75, 0xe0, 0xd3, 0xde, 0xe8, 0x5a, 0xf6, 0x0e, 0xa6, 0x55,
- 0xeb, 0xc9, 0xde, 0xf3, 0xd9, 0x32, 0x59, 0x2f, 0xae, 0x5f, 0x9e, 0x2f, 0xe3, 0x7d, 0x74, 0x42,
- 0xee, 0x9d, 0x20, 0xc2, 0xc6, 0x84, 0x81, 0x1d, 0xce, 0x18, 0xa4, 0x7b, 0x2b, 0x8f, 0xfc, 0x22,
- 0xfe, 0x81, 0x58, 0xb3, 0xd7, 0xf0, 0xb0, 0x41, 0xef, 0xac, 0xf1, 0x58, 0x46, 0xf3, 0x32, 0x9a,
- 0x97, 0x83, 0x98, 0x07, 0x68, 0x0b, 0x4f, 0x84, 0x94, 0x8a, 0x94, 0x35, 0x42, 0x97, 0x7b, 0x65,
- 0xa4, 0x32, 0xb5, 0xe7, 0x8b, 0xff, 0x7c, 0x0b, 0x76, 0x7a, 0x90, 0xf7, 0x7c, 0x3e, 0x87, 0x0b,
- 0xd7, 0x85, 0x5a, 0xdd, 0xc0, 0xe3, 0x7f, 0x92, 0x86, 0x7c, 0xdf, 0x94, 0x91, 0xfd, 0x82, 0x63,
- 0x1d, 0x34, 0x27, 0xe8, 0xd0, 0x6d, 0xb7, 0x88, 0x75, 0xfe, 0x15, 0x1e, 0x55, 0xf6, 0xfe, 0xec,
- 0x67, 0xf3, 0x79, 0x1c, 0x13, 0xae, 0x67, 0x97, 0x7c, 0xbe, 0xed, 0x8d, 0xda, 0x6a, 0x61, 0xea,
- 0xcc, 0x36, 0xf5, 0xa6, 0x46, 0x13, 0x6f, 0x6b, 0xd3, 0x59, 0xc2, 0x29, 0x1f, 0xaf, 0x4e, 0x18,
- 0x63, 0x49, 0x84, 0x98, 0xfe, 0xe6, 0xac, 0xfe, 0x9d, 0x24, 0x3f, 0x47, 0xe9, 0xc7, 0xdb, 0xdd,
- 0xa7, 0xfd, 0x34, 0xbe, 0x7b, 0xfb, 0x27, 0x00, 0x00, 0xff, 0xff, 0xae, 0xde, 0xa1, 0xd0, 0xac,
- 0x02, 0x00, 0x00,
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_api_http_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_api_http_proto_goTypes,
+ DependencyIndexes: file_google_api_http_proto_depIdxs,
+ MessageInfos: file_google_api_http_proto_msgTypes,
+ }.Build()
+ File_google_api_http_proto = out.File
+ file_google_api_http_proto_rawDesc = nil
+ file_google_api_http_proto_goTypes = nil
+ file_google_api_http_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go
index 1704870..6515668 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go
@@ -1,26 +1,40 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.12.2
// source: google/api/resource.proto
package annotations
import (
- fmt "fmt"
- math "math"
+ reflect "reflect"
+ sync "sync"
- proto "github.com/golang/protobuf/proto"
- descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
// A description of the historical or future-looking state of the
// resource pattern.
@@ -38,24 +52,101 @@
ResourceDescriptor_FUTURE_MULTI_PATTERN ResourceDescriptor_History = 2
)
-var ResourceDescriptor_History_name = map[int32]string{
- 0: "HISTORY_UNSPECIFIED",
- 1: "ORIGINALLY_SINGLE_PATTERN",
- 2: "FUTURE_MULTI_PATTERN",
-}
+// Enum value maps for ResourceDescriptor_History.
+var (
+ ResourceDescriptor_History_name = map[int32]string{
+ 0: "HISTORY_UNSPECIFIED",
+ 1: "ORIGINALLY_SINGLE_PATTERN",
+ 2: "FUTURE_MULTI_PATTERN",
+ }
+ ResourceDescriptor_History_value = map[string]int32{
+ "HISTORY_UNSPECIFIED": 0,
+ "ORIGINALLY_SINGLE_PATTERN": 1,
+ "FUTURE_MULTI_PATTERN": 2,
+ }
+)
-var ResourceDescriptor_History_value = map[string]int32{
- "HISTORY_UNSPECIFIED": 0,
- "ORIGINALLY_SINGLE_PATTERN": 1,
- "FUTURE_MULTI_PATTERN": 2,
+func (x ResourceDescriptor_History) Enum() *ResourceDescriptor_History {
+ p := new(ResourceDescriptor_History)
+ *p = x
+ return p
}
func (x ResourceDescriptor_History) String() string {
- return proto.EnumName(ResourceDescriptor_History_name, int32(x))
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
+func (ResourceDescriptor_History) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_api_resource_proto_enumTypes[0].Descriptor()
+}
+
+func (ResourceDescriptor_History) Type() protoreflect.EnumType {
+ return &file_google_api_resource_proto_enumTypes[0]
+}
+
+func (x ResourceDescriptor_History) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ResourceDescriptor_History.Descriptor instead.
func (ResourceDescriptor_History) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_465e9122405d1bb5, []int{0, 0}
+ return file_google_api_resource_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// A flag representing a specific style that a resource claims to conform to.
+type ResourceDescriptor_Style int32
+
+const (
+ // The unspecified value. Do not use.
+ ResourceDescriptor_STYLE_UNSPECIFIED ResourceDescriptor_Style = 0
+ // This resource is intended to be "declarative-friendly".
+ //
+ // Declarative-friendly resources must be more strictly consistent, and
+ // setting this to true communicates to tools that this resource should
+ // adhere to declarative-friendly expectations.
+ //
+ // Note: This is used by the API linter (linter.aip.dev) to enable
+ // additional checks.
+ ResourceDescriptor_DECLARATIVE_FRIENDLY ResourceDescriptor_Style = 1
+)
+
+// Enum value maps for ResourceDescriptor_Style.
+var (
+ ResourceDescriptor_Style_name = map[int32]string{
+ 0: "STYLE_UNSPECIFIED",
+ 1: "DECLARATIVE_FRIENDLY",
+ }
+ ResourceDescriptor_Style_value = map[string]int32{
+ "STYLE_UNSPECIFIED": 0,
+ "DECLARATIVE_FRIENDLY": 1,
+ }
+)
+
+func (x ResourceDescriptor_Style) Enum() *ResourceDescriptor_Style {
+ p := new(ResourceDescriptor_Style)
+ *p = x
+ return p
+}
+
+func (x ResourceDescriptor_Style) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ResourceDescriptor_Style) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_api_resource_proto_enumTypes[1].Descriptor()
+}
+
+func (ResourceDescriptor_Style) Type() protoreflect.EnumType {
+ return &file_google_api_resource_proto_enumTypes[1]
+}
+
+func (x ResourceDescriptor_Style) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ResourceDescriptor_Style.Descriptor instead.
+func (ResourceDescriptor_Style) EnumDescriptor() ([]byte, []int) {
+ return file_google_api_resource_proto_rawDescGZIP(), []int{0, 1}
}
// A simple descriptor of a resource type.
@@ -72,11 +163,7 @@
// // For Kubernetes resources, the format is {api group}/{kind}.
// option (google.api.resource) = {
// type: "pubsub.googleapis.com/Topic"
-// name_descriptor: {
-// pattern: "projects/{project}/topics/{topic}"
-// parent_type: "cloudresourcemanager.googleapis.com/Project"
-// parent_name_extractor: "projects/{project}"
-// }
+// pattern: "projects/{project}/topics/{topic}"
// };
// }
//
@@ -84,10 +171,7 @@
//
// resources:
// - type: "pubsub.googleapis.com/Topic"
-// name_descriptor:
-// - pattern: "projects/{project}/topics/{topic}"
-// parent_type: "cloudresourcemanager.googleapis.com/Project"
-// parent_name_extractor: "projects/{project}"
+// pattern: "projects/{project}/topics/{topic}"
//
// Sometimes, resources have multiple patterns, typically because they can
// live under multiple parents.
@@ -97,26 +181,10 @@
// message LogEntry {
// option (google.api.resource) = {
// type: "logging.googleapis.com/LogEntry"
-// name_descriptor: {
-// pattern: "projects/{project}/logs/{log}"
-// parent_type: "cloudresourcemanager.googleapis.com/Project"
-// parent_name_extractor: "projects/{project}"
-// }
-// name_descriptor: {
-// pattern: "folders/{folder}/logs/{log}"
-// parent_type: "cloudresourcemanager.googleapis.com/Folder"
-// parent_name_extractor: "folders/{folder}"
-// }
-// name_descriptor: {
-// pattern: "organizations/{organization}/logs/{log}"
-// parent_type: "cloudresourcemanager.googleapis.com/Organization"
-// parent_name_extractor: "organizations/{organization}"
-// }
-// name_descriptor: {
-// pattern: "billingAccounts/{billing_account}/logs/{log}"
-// parent_type: "billing.googleapis.com/BillingAccount"
-// parent_name_extractor: "billingAccounts/{billing_account}"
-// }
+// pattern: "projects/{project}/logs/{log}"
+// pattern: "folders/{folder}/logs/{log}"
+// pattern: "organizations/{organization}/logs/{log}"
+// pattern: "billingAccounts/{billing_account}/logs/{log}"
// };
// }
//
@@ -124,49 +192,15 @@
//
// resources:
// - type: 'logging.googleapis.com/LogEntry'
-// name_descriptor:
-// - pattern: "projects/{project}/logs/{log}"
-// parent_type: "cloudresourcemanager.googleapis.com/Project"
-// parent_name_extractor: "projects/{project}"
-// - pattern: "folders/{folder}/logs/{log}"
-// parent_type: "cloudresourcemanager.googleapis.com/Folder"
-// parent_name_extractor: "folders/{folder}"
-// - pattern: "organizations/{organization}/logs/{log}"
-// parent_type: "cloudresourcemanager.googleapis.com/Organization"
-// parent_name_extractor: "organizations/{organization}"
-// - pattern: "billingAccounts/{billing_account}/logs/{log}"
-// parent_type: "billing.googleapis.com/BillingAccount"
-// parent_name_extractor: "billingAccounts/{billing_account}"
-//
-// For flexible resources, the resource name doesn't contain parent names, but
-// the resource itself has parents for policy evaluation.
-//
-// Example:
-//
-// message Shelf {
-// option (google.api.resource) = {
-// type: "library.googleapis.com/Shelf"
-// name_descriptor: {
-// pattern: "shelves/{shelf}"
-// parent_type: "cloudresourcemanager.googleapis.com/Project"
-// }
-// name_descriptor: {
-// pattern: "shelves/{shelf}"
-// parent_type: "cloudresourcemanager.googleapis.com/Folder"
-// }
-// };
-// }
-//
-// The ResourceDescriptor Yaml config will look like:
-//
-// resources:
-// - type: 'library.googleapis.com/Shelf'
-// name_descriptor:
-// - pattern: "shelves/{shelf}"
-// parent_type: "cloudresourcemanager.googleapis.com/Project"
-// - pattern: "shelves/{shelf}"
-// parent_type: "cloudresourcemanager.googleapis.com/Folder"
+// pattern: "projects/{project}/logs/{log}"
+// pattern: "folders/{folder}/logs/{log}"
+// pattern: "organizations/{organization}/logs/{log}"
+// pattern: "billingAccounts/{billing_account}/logs/{log}"
type ResourceDescriptor struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// The resource type. It must be in the format of
// {service_name}/{resource_type_kind}. The `resource_type_kind` must be
// singular and must not include version numbers.
@@ -217,90 +251,113 @@
// };
// }
History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"`
- // The plural name used in the resource name, such as 'projects' for
- // the name of 'projects/{project}'. It is the same concept of the `plural`
- // field in k8s CRD spec
+ // The plural name used in the resource name and permission names, such as
+ // 'projects' for the resource name of 'projects/{project}' and the permission
+ // name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same
+ // concept of the `plural` field in k8s CRD spec
// https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
+ //
+ // Note: The plural form is required even for singleton resources. See
+ // https://aip.dev/156
Plural string `protobuf:"bytes,5,opt,name=plural,proto3" json:"plural,omitempty"`
// The same concept of the `singular` field in k8s CRD spec
// https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
// Such as "project" for the `resourcemanager.googleapis.com/Project` type.
- Singular string `protobuf:"bytes,6,opt,name=singular,proto3" json:"singular,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Singular string `protobuf:"bytes,6,opt,name=singular,proto3" json:"singular,omitempty"`
+ // Style flag(s) for this resource.
+ // These indicate that a resource is expected to conform to a given
+ // style. See the specific style flags for additional information.
+ Style []ResourceDescriptor_Style `protobuf:"varint,10,rep,packed,name=style,proto3,enum=google.api.ResourceDescriptor_Style" json:"style,omitempty"`
}
-func (m *ResourceDescriptor) Reset() { *m = ResourceDescriptor{} }
-func (m *ResourceDescriptor) String() string { return proto.CompactTextString(m) }
-func (*ResourceDescriptor) ProtoMessage() {}
+func (x *ResourceDescriptor) Reset() {
+ *x = ResourceDescriptor{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_resource_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ResourceDescriptor) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceDescriptor) ProtoMessage() {}
+
+func (x *ResourceDescriptor) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_resource_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceDescriptor.ProtoReflect.Descriptor instead.
func (*ResourceDescriptor) Descriptor() ([]byte, []int) {
- return fileDescriptor_465e9122405d1bb5, []int{0}
+ return file_google_api_resource_proto_rawDescGZIP(), []int{0}
}
-func (m *ResourceDescriptor) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ResourceDescriptor.Unmarshal(m, b)
-}
-func (m *ResourceDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ResourceDescriptor.Marshal(b, m, deterministic)
-}
-func (m *ResourceDescriptor) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceDescriptor.Merge(m, src)
-}
-func (m *ResourceDescriptor) XXX_Size() int {
- return xxx_messageInfo_ResourceDescriptor.Size(m)
-}
-func (m *ResourceDescriptor) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceDescriptor.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceDescriptor proto.InternalMessageInfo
-
-func (m *ResourceDescriptor) GetType() string {
- if m != nil {
- return m.Type
+func (x *ResourceDescriptor) GetType() string {
+ if x != nil {
+ return x.Type
}
return ""
}
-func (m *ResourceDescriptor) GetPattern() []string {
- if m != nil {
- return m.Pattern
+func (x *ResourceDescriptor) GetPattern() []string {
+ if x != nil {
+ return x.Pattern
}
return nil
}
-func (m *ResourceDescriptor) GetNameField() string {
- if m != nil {
- return m.NameField
+func (x *ResourceDescriptor) GetNameField() string {
+ if x != nil {
+ return x.NameField
}
return ""
}
-func (m *ResourceDescriptor) GetHistory() ResourceDescriptor_History {
- if m != nil {
- return m.History
+func (x *ResourceDescriptor) GetHistory() ResourceDescriptor_History {
+ if x != nil {
+ return x.History
}
return ResourceDescriptor_HISTORY_UNSPECIFIED
}
-func (m *ResourceDescriptor) GetPlural() string {
- if m != nil {
- return m.Plural
+func (x *ResourceDescriptor) GetPlural() string {
+ if x != nil {
+ return x.Plural
}
return ""
}
-func (m *ResourceDescriptor) GetSingular() string {
- if m != nil {
- return m.Singular
+func (x *ResourceDescriptor) GetSingular() string {
+ if x != nil {
+ return x.Singular
}
return ""
}
+func (x *ResourceDescriptor) GetStyle() []ResourceDescriptor_Style {
+ if x != nil {
+ return x.Style
+ }
+ return nil
+}
+
// Defines a proto annotation that describes a string field that refers to
// an API resource.
type ResourceReference struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// The resource type that the annotated field references.
//
// Example:
@@ -310,6 +367,17 @@
// type: "pubsub.googleapis.com/Topic"
// }];
// }
+ //
+ // Occasionally, a field may reference an arbitrary resource. In this case,
+ // APIs use the special value * in their resource reference.
+ //
+ // Example:
+ //
+ // message GetIamPolicyRequest {
+ // string resource = 2 [(google.api.resource_reference) = {
+ // type: "*"
+ // }];
+ // }
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
// The resource type of a child collection that the annotated field
// references. This is useful for annotating the `parent` field that
@@ -322,122 +390,266 @@
// child_type: "logging.googleapis.com/LogEntry"
// };
// }
- ChildType string `protobuf:"bytes,2,opt,name=child_type,json=childType,proto3" json:"child_type,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ ChildType string `protobuf:"bytes,2,opt,name=child_type,json=childType,proto3" json:"child_type,omitempty"`
}
-func (m *ResourceReference) Reset() { *m = ResourceReference{} }
-func (m *ResourceReference) String() string { return proto.CompactTextString(m) }
-func (*ResourceReference) ProtoMessage() {}
+func (x *ResourceReference) Reset() {
+ *x = ResourceReference{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_resource_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ResourceReference) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceReference) ProtoMessage() {}
+
+func (x *ResourceReference) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_resource_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceReference.ProtoReflect.Descriptor instead.
func (*ResourceReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_465e9122405d1bb5, []int{1}
+ return file_google_api_resource_proto_rawDescGZIP(), []int{1}
}
-func (m *ResourceReference) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ResourceReference.Unmarshal(m, b)
-}
-func (m *ResourceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ResourceReference.Marshal(b, m, deterministic)
-}
-func (m *ResourceReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceReference.Merge(m, src)
-}
-func (m *ResourceReference) XXX_Size() int {
- return xxx_messageInfo_ResourceReference.Size(m)
-}
-func (m *ResourceReference) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceReference.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceReference proto.InternalMessageInfo
-
-func (m *ResourceReference) GetType() string {
- if m != nil {
- return m.Type
+func (x *ResourceReference) GetType() string {
+ if x != nil {
+ return x.Type
}
return ""
}
-func (m *ResourceReference) GetChildType() string {
- if m != nil {
- return m.ChildType
+func (x *ResourceReference) GetChildType() string {
+ if x != nil {
+ return x.ChildType
}
return ""
}
-var E_ResourceReference = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FieldOptions)(nil),
- ExtensionType: (*ResourceReference)(nil),
- Field: 1055,
- Name: "google.api.resource_reference",
- Tag: "bytes,1055,opt,name=resource_reference",
- Filename: "google/api/resource.proto",
+var file_google_api_resource_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*ResourceReference)(nil),
+ Field: 1055,
+ Name: "google.api.resource_reference",
+ Tag: "bytes,1055,opt,name=resource_reference",
+ Filename: "google/api/resource.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FileOptions)(nil),
+ ExtensionType: ([]*ResourceDescriptor)(nil),
+ Field: 1053,
+ Name: "google.api.resource_definition",
+ Tag: "bytes,1053,rep,name=resource_definition",
+ Filename: "google/api/resource.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.MessageOptions)(nil),
+ ExtensionType: (*ResourceDescriptor)(nil),
+ Field: 1053,
+ Name: "google.api.resource",
+ Tag: "bytes,1053,opt,name=resource",
+ Filename: "google/api/resource.proto",
+ },
}
-var E_ResourceDefinition = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: ([]*ResourceDescriptor)(nil),
- Field: 1053,
- Name: "google.api.resource_definition",
- Tag: "bytes,1053,rep,name=resource_definition",
- Filename: "google/api/resource.proto",
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // An annotation that describes a resource reference, see
+ // [ResourceReference][].
+ //
+ // optional google.api.ResourceReference resource_reference = 1055;
+ E_ResourceReference = &file_google_api_resource_proto_extTypes[0]
+)
+
+// Extension fields to descriptorpb.FileOptions.
+var (
+ // An annotation that describes a resource definition without a corresponding
+ // message; see [ResourceDescriptor][].
+ //
+ // repeated google.api.ResourceDescriptor resource_definition = 1053;
+ E_ResourceDefinition = &file_google_api_resource_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+ // An annotation that describes a resource definition, see
+ // [ResourceDescriptor][].
+ //
+ // optional google.api.ResourceDescriptor resource = 1053;
+ E_Resource = &file_google_api_resource_proto_extTypes[2]
+)
+
+var File_google_api_resource_proto protoreflect.FileDescriptor
+
+var file_google_api_resource_proto_rawDesc = []byte{
+ 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xaa, 0x03, 0x0a, 0x12, 0x52, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18,
+ 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x40, 0x0a,
+ 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x48,
+ 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12,
+ 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x72, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x06, 0x70, 0x6c, 0x75, 0x72, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x67, 0x75,
+ 0x6c, 0x61, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x69, 0x6e, 0x67, 0x75,
+ 0x6c, 0x61, 0x72, 0x12, 0x3a, 0x0a, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x03,
+ 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+ 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x79, 0x6c, 0x65, 0x52, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x22,
+ 0x5b, 0x0a, 0x07, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x13, 0x48, 0x49,
+ 0x53, 0x54, 0x4f, 0x52, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x4c,
+ 0x59, 0x5f, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x5f, 0x50, 0x41, 0x54, 0x54, 0x45, 0x52, 0x4e,
+ 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x46, 0x55, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x4d, 0x55, 0x4c,
+ 0x54, 0x49, 0x5f, 0x50, 0x41, 0x54, 0x54, 0x45, 0x52, 0x4e, 0x10, 0x02, 0x22, 0x38, 0x0a, 0x05,
+ 0x53, 0x74, 0x79, 0x6c, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x59, 0x4c, 0x45, 0x5f, 0x55,
+ 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14,
+ 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x46, 0x52, 0x49, 0x45,
+ 0x4e, 0x44, 0x4c, 0x59, 0x10, 0x01, 0x22, 0x46, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12,
+ 0x1d, 0x0a, 0x0a, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x6c,
+ 0x0a, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72,
+ 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x9f, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x11, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x3a, 0x6e, 0x0a, 0x13,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x9d, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x5c, 0x0a, 0x08,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9d, 0x08, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x6e, 0x0a, 0x0e, 0x63, 0x6f,
+ 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x52, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67,
+ 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
}
-var E_Resource = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*ResourceDescriptor)(nil),
- Field: 1053,
- Name: "google.api.resource",
- Tag: "bytes,1053,opt,name=resource",
- Filename: "google/api/resource.proto",
+var (
+ file_google_api_resource_proto_rawDescOnce sync.Once
+ file_google_api_resource_proto_rawDescData = file_google_api_resource_proto_rawDesc
+)
+
+func file_google_api_resource_proto_rawDescGZIP() []byte {
+ file_google_api_resource_proto_rawDescOnce.Do(func() {
+ file_google_api_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_resource_proto_rawDescData)
+ })
+ return file_google_api_resource_proto_rawDescData
}
-func init() {
- proto.RegisterEnum("google.api.ResourceDescriptor_History", ResourceDescriptor_History_name, ResourceDescriptor_History_value)
- proto.RegisterType((*ResourceDescriptor)(nil), "google.api.ResourceDescriptor")
- proto.RegisterType((*ResourceReference)(nil), "google.api.ResourceReference")
- proto.RegisterExtension(E_ResourceReference)
- proto.RegisterExtension(E_ResourceDefinition)
- proto.RegisterExtension(E_Resource)
+var file_google_api_resource_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_google_api_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_google_api_resource_proto_goTypes = []interface{}{
+ (ResourceDescriptor_History)(0), // 0: google.api.ResourceDescriptor.History
+ (ResourceDescriptor_Style)(0), // 1: google.api.ResourceDescriptor.Style
+ (*ResourceDescriptor)(nil), // 2: google.api.ResourceDescriptor
+ (*ResourceReference)(nil), // 3: google.api.ResourceReference
+ (*descriptorpb.FieldOptions)(nil), // 4: google.protobuf.FieldOptions
+ (*descriptorpb.FileOptions)(nil), // 5: google.protobuf.FileOptions
+ (*descriptorpb.MessageOptions)(nil), // 6: google.protobuf.MessageOptions
+}
+var file_google_api_resource_proto_depIdxs = []int32{
+ 0, // 0: google.api.ResourceDescriptor.history:type_name -> google.api.ResourceDescriptor.History
+ 1, // 1: google.api.ResourceDescriptor.style:type_name -> google.api.ResourceDescriptor.Style
+ 4, // 2: google.api.resource_reference:extendee -> google.protobuf.FieldOptions
+ 5, // 3: google.api.resource_definition:extendee -> google.protobuf.FileOptions
+ 6, // 4: google.api.resource:extendee -> google.protobuf.MessageOptions
+ 3, // 5: google.api.resource_reference:type_name -> google.api.ResourceReference
+ 2, // 6: google.api.resource_definition:type_name -> google.api.ResourceDescriptor
+ 2, // 7: google.api.resource:type_name -> google.api.ResourceDescriptor
+ 8, // [8:8] is the sub-list for method output_type
+ 8, // [8:8] is the sub-list for method input_type
+ 5, // [5:8] is the sub-list for extension type_name
+ 2, // [2:5] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
}
-func init() {
- proto.RegisterFile("google/api/resource.proto", fileDescriptor_465e9122405d1bb5)
-}
-
-var fileDescriptor_465e9122405d1bb5 = []byte{
- // 490 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x4c,
- 0x14, 0xfd, 0x9c, 0xe4, 0xcb, 0xcf, 0xad, 0xa8, 0xda, 0x29, 0x02, 0xb7, 0x22, 0x60, 0x65, 0x81,
- 0xb2, 0xb2, 0xa5, 0xb0, 0x0b, 0x1b, 0x52, 0xe2, 0xa4, 0x96, 0xd2, 0xc4, 0x9a, 0x38, 0x8b, 0x02,
- 0x92, 0x35, 0x75, 0x26, 0xee, 0x48, 0xee, 0xcc, 0x68, 0xec, 0x2c, 0xf2, 0x30, 0x08, 0x89, 0x67,
- 0xe0, 0xe1, 0x58, 0xa2, 0x8c, 0x7f, 0x88, 0x68, 0x84, 0xd8, 0xcd, 0xbd, 0xe7, 0xde, 0x73, 0x8e,
- 0xcf, 0x95, 0xe1, 0x32, 0x16, 0x22, 0x4e, 0xa8, 0x43, 0x24, 0x73, 0x14, 0x4d, 0xc5, 0x56, 0x45,
- 0xd4, 0x96, 0x4a, 0x64, 0x02, 0x41, 0x0e, 0xd9, 0x44, 0xb2, 0x2b, 0xab, 0x18, 0xd3, 0xc8, 0xfd,
- 0x76, 0xe3, 0xac, 0x69, 0x1a, 0x29, 0x26, 0x33, 0xa1, 0xf2, 0xe9, 0xde, 0x8f, 0x1a, 0x20, 0x5c,
- 0x10, 0x8c, 0x2b, 0x10, 0x21, 0x68, 0x64, 0x3b, 0x49, 0x4d, 0xc3, 0x32, 0xfa, 0x1d, 0xac, 0xdf,
- 0xc8, 0x84, 0x96, 0x24, 0x59, 0x46, 0x15, 0x37, 0x6b, 0x56, 0xbd, 0xdf, 0xc1, 0x65, 0x89, 0xba,
- 0x00, 0x9c, 0x3c, 0xd2, 0x70, 0xc3, 0x68, 0xb2, 0x36, 0xeb, 0x7a, 0xa7, 0xb3, 0xef, 0x4c, 0xf6,
- 0x0d, 0xf4, 0x01, 0x5a, 0x0f, 0x2c, 0xcd, 0x84, 0xda, 0x99, 0x0d, 0xcb, 0xe8, 0x9f, 0x0e, 0xde,
- 0xda, 0xbf, 0x3d, 0xda, 0x4f, 0xd5, 0xed, 0x9b, 0x7c, 0x1a, 0x97, 0x6b, 0xe8, 0x05, 0x34, 0x65,
- 0xb2, 0x55, 0x24, 0x31, 0xff, 0xd7, 0xe4, 0x45, 0x85, 0xae, 0xa0, 0x9d, 0x32, 0x1e, 0x6f, 0x13,
- 0xa2, 0xcc, 0xa6, 0x46, 0xaa, 0xba, 0xf7, 0x19, 0x5a, 0x05, 0x0f, 0x7a, 0x09, 0x17, 0x37, 0xde,
- 0x32, 0x58, 0xe0, 0xbb, 0x70, 0x35, 0x5f, 0xfa, 0xee, 0x47, 0x6f, 0xe2, 0xb9, 0xe3, 0xb3, 0xff,
- 0x50, 0x17, 0x2e, 0x17, 0xd8, 0x9b, 0x7a, 0xf3, 0xd1, 0x6c, 0x76, 0x17, 0x2e, 0xbd, 0xf9, 0x74,
- 0xe6, 0x86, 0xfe, 0x28, 0x08, 0x5c, 0x3c, 0x3f, 0x33, 0x90, 0x09, 0xcf, 0x27, 0xab, 0x60, 0x85,
- 0xdd, 0xf0, 0x76, 0x35, 0x0b, 0xbc, 0x0a, 0xa9, 0xf5, 0x26, 0x70, 0x5e, 0xfa, 0xc6, 0x74, 0x43,
- 0x15, 0xe5, 0x11, 0x3d, 0x1a, 0x5a, 0x17, 0x20, 0x7a, 0x60, 0xc9, 0x3a, 0xd4, 0x48, 0x2d, 0x8f,
- 0x46, 0x77, 0x82, 0x9d, 0xa4, 0xc3, 0x04, 0x50, 0x79, 0xbe, 0x50, 0x55, 0x44, 0xdd, 0x32, 0x9f,
- 0xf2, 0x6e, 0xb6, 0x0e, 0x72, 0x21, 0x33, 0x26, 0x78, 0x6a, 0x7e, 0x6b, 0x5b, 0x46, 0xff, 0x64,
- 0xd0, 0x3d, 0x96, 0x62, 0xe5, 0x06, 0x9f, 0xab, 0x3f, 0x5b, 0x43, 0x0e, 0x17, 0x95, 0xda, 0x9a,
- 0x6e, 0x18, 0x67, 0x7b, 0x42, 0xf4, 0xea, 0x88, 0x5c, 0x42, 0x4b, 0xb5, 0xaf, 0x6d, 0xab, 0xde,
- 0x3f, 0x19, 0xbc, 0xfe, 0xfb, 0xcd, 0x70, 0xf5, 0x1d, 0xe3, 0x8a, 0x78, 0xf8, 0x05, 0xda, 0x65,
- 0x17, 0xbd, 0x79, 0x22, 0x72, 0x4b, 0xd3, 0x94, 0xc4, 0x87, 0x3a, 0xc6, 0x3f, 0xe8, 0x54, 0x8c,
- 0xd7, 0x1c, 0x4e, 0x23, 0xf1, 0x78, 0x30, 0x7e, 0xfd, 0xac, 0x9c, 0xf7, 0xf7, 0x1a, 0xbe, 0xf1,
- 0x69, 0x54, 0x80, 0xb1, 0x48, 0x08, 0x8f, 0x6d, 0xa1, 0x62, 0x27, 0xa6, 0x5c, 0x3b, 0x70, 0x72,
- 0x88, 0x48, 0x96, 0xea, 0xbf, 0x88, 0x70, 0x2e, 0x32, 0xa2, 0xad, 0xbc, 0x3f, 0x78, 0xff, 0x34,
- 0x8c, 0xef, 0xb5, 0xc6, 0x74, 0xe4, 0x7b, 0xf7, 0x4d, 0xbd, 0xf7, 0xee, 0x57, 0x00, 0x00, 0x00,
- 0xff, 0xff, 0x75, 0x12, 0x53, 0xef, 0x7c, 0x03, 0x00, 0x00,
+func init() { file_google_api_resource_proto_init() }
+func file_google_api_resource_proto_init() {
+ if File_google_api_resource_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_api_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResourceDescriptor); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_resource_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResourceReference); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_api_resource_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 2,
+ NumExtensions: 3,
+ NumServices: 0,
+ },
+ GoTypes: file_google_api_resource_proto_goTypes,
+ DependencyIndexes: file_google_api_resource_proto_depIdxs,
+ EnumInfos: file_google_api_resource_proto_enumTypes,
+ MessageInfos: file_google_api_resource_proto_msgTypes,
+ ExtensionInfos: file_google_api_resource_proto_extTypes,
+ }.Build()
+ File_google_api_resource_proto = out.File
+ file_google_api_resource_proto_rawDesc = nil
+ file_google_api_resource_proto_goTypes = nil
+ file_google_api_resource_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go
new file mode 100644
index 0000000..dd45cf6
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go
@@ -0,0 +1,693 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.12.2
+// source: google/api/routing.proto
+
+package annotations
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Specifies the routing information that should be sent along with the request
+// in the form of routing header.
+// **NOTE:** All service configuration rules follow the "last one wins" order.
+//
+// The examples below will apply to an RPC which has the following request type:
+//
+// Message Definition:
+//
+// message Request {
+// // The name of the Table
+// // Values can be of the following formats:
+// // - `projects/<project>/tables/<table>`
+// // - `projects/<project>/instances/<instance>/tables/<table>`
+// // - `region/<region>/zones/<zone>/tables/<table>`
+// string table_name = 1;
+//
+// // This value specifies routing for replication.
+// // It can be in the following formats:
+// // - `profiles/<profile_id>`
+// // - a legacy `profile_id` that can be any string
+// string app_profile_id = 2;
+// }
+//
+// Example message:
+//
+// {
+// table_name: projects/proj_foo/instances/instance_bar/table/table_baz,
+// app_profile_id: profiles/prof_qux
+// }
+//
+// The routing header consists of one or multiple key-value pairs. Every key
+// and value must be percent-encoded, and joined together in the format of
+// `key1=value1&key2=value2`.
+// In the examples below I am skipping the percent-encoding for readablity.
+//
+// Example 1
+//
+// Extracting a field from the request to put into the routing header
+// unchanged, with the key equal to the field name.
+//
+// annotation:
+//
+// option (google.api.routing) = {
+// // Take the `app_profile_id`.
+// routing_parameters {
+// field: "app_profile_id"
+// }
+// };
+//
+// result:
+//
+// x-goog-request-params: app_profile_id=profiles/prof_qux
+//
+// Example 2
+//
+// Extracting a field from the request to put into the routing header
+// unchanged, with the key different from the field name.
+//
+// annotation:
+//
+// option (google.api.routing) = {
+// // Take the `app_profile_id`, but name it `routing_id` in the header.
+// routing_parameters {
+// field: "app_profile_id"
+// path_template: "{routing_id=**}"
+// }
+// };
+//
+// result:
+//
+// x-goog-request-params: routing_id=profiles/prof_qux
+//
+// Example 3
+//
+// Extracting a field from the request to put into the routing
+// header, while matching a path template syntax on the field's value.
+//
+// NB: it is more useful to send nothing than to send garbage for the purpose
+// of dynamic routing, since garbage pollutes cache. Thus the matching.
+//
+// Sub-example 3a
+//
+// The field matches the template.
+//
+// annotation:
+//
+// option (google.api.routing) = {
+// // Take the `table_name`, if it's well-formed (with project-based
+// // syntax).
+// routing_parameters {
+// field: "table_name"
+// path_template: "{table_name=projects/*/instances/*/**}"
+// }
+// };
+//
+// result:
+//
+// x-goog-request-params:
+// table_name=projects/proj_foo/instances/instance_bar/table/table_baz
+//
+// Sub-example 3b
+//
+// The field does not match the template.
+//
+// annotation:
+//
+// option (google.api.routing) = {
+// // Take the `table_name`, if it's well-formed (with region-based
+// // syntax).
+// routing_parameters {
+// field: "table_name"
+// path_template: "{table_name=regions/*/zones/*/**}"
+// }
+// };
+//
+// result:
+//
+// <no routing header will be sent>
+//
+// Sub-example 3c
+//
+// Multiple alternative conflictingly named path templates are
+// specified. The one that matches is used to construct the header.
+//
+// annotation:
+//
+// option (google.api.routing) = {
+// // Take the `table_name`, if it's well-formed, whether
+// // using the region- or projects-based syntax.
+//
+// routing_parameters {
+// field: "table_name"
+// path_template: "{table_name=regions/*/zones/*/**}"
+// }
+// routing_parameters {
+// field: "table_name"
+// path_template: "{table_name=projects/*/instances/*/**}"
+// }
+// };
+//
+// result:
+//
+// x-goog-request-params:
+// table_name=projects/proj_foo/instances/instance_bar/table/table_baz
+//
+// Example 4
+//
+// Extracting a single routing header key-value pair by matching a
+// template syntax on (a part of) a single request field.
+//
+// annotation:
+//
+// option (google.api.routing) = {
+// // Take just the project id from the `table_name` field.
+// routing_parameters {
+// field: "table_name"
+// path_template: "{routing_id=projects/*}/**"
+// }
+// };
+//
+// result:
+//
+// x-goog-request-params: routing_id=projects/proj_foo
+//
+// Example 5
+//
+// Extracting a single routing header key-value pair by matching
+// several conflictingly named path templates on (parts of) a single request
+// field. The last template to match "wins" the conflict.
+//
+// annotation:
+//
+// option (google.api.routing) = {
+// // If the `table_name` does not have instances information,
+// // take just the project id for routing.
+// // Otherwise take project + instance.
+//
+// routing_parameters {
+// field: "table_name"
+// path_template: "{routing_id=projects/*}/**"
+// }
+// routing_parameters {
+// field: "table_name"
+// path_template: "{routing_id=projects/*/instances/*}/**"
+// }
+// };
+//
+// result:
+//
+// x-goog-request-params:
+// routing_id=projects/proj_foo/instances/instance_bar
+//
+// Example 6
+//
+// Extracting multiple routing header key-value pairs by matching
+// several non-conflicting path templates on (parts of) a single request field.
+//
+// Sub-example 6a
+//
+// Make the templates strict, so that if the `table_name` does not
+// have an instance information, nothing is sent.
+//
+// annotation:
+//
+// option (google.api.routing) = {
+// // The routing code needs two keys instead of one composite
+// // but works only for the tables with the "project-instance" name
+// // syntax.
+//
+// routing_parameters {
+// field: "table_name"
+// path_template: "{project_id=projects/*}/instances/*/**"
+// }
+// routing_parameters {
+// field: "table_name"
+// path_template: "projects/*/{instance_id=instances/*}/**"
+// }
+// };
+//
+// result:
+//
+// x-goog-request-params:
+// project_id=projects/proj_foo&instance_id=instances/instance_bar
+//
+// Sub-example 6b
+//
+// Make the templates loose, so that if the `table_name` does not
+// have an instance information, just the project id part is sent.
+//
+// annotation:
+//
+// option (google.api.routing) = {
+// // The routing code wants two keys instead of one composite
+// // but will work with just the `project_id` for tables without
+// // an instance in the `table_name`.
+//
+// routing_parameters {
+// field: "table_name"
+// path_template: "{project_id=projects/*}/**"
+// }
+// routing_parameters {
+// field: "table_name"
+// path_template: "projects/*/{instance_id=instances/*}/**"
+// }
+// };
+//
+// result (is the same as 6a for our example message because it has the instance
+// information):
+//
+// x-goog-request-params:
+// project_id=projects/proj_foo&instance_id=instances/instance_bar
+//
+// Example 7
+//
+// Extracting multiple routing header key-value pairs by matching
+// several path templates on multiple request fields.
+//
+// NB: note that here there is no way to specify sending nothing if one of the
+// fields does not match its template. E.g. if the `table_name` is in the wrong
+// format, the `project_id` will not be sent, but the `routing_id` will be.
+// The backend routing code has to be aware of that and be prepared to not
+// receive a full complement of keys if it expects multiple.
+//
+// annotation:
+//
+// option (google.api.routing) = {
+// // The routing needs both `project_id` and `routing_id`
+// // (from the `app_profile_id` field) for routing.
+//
+// routing_parameters {
+// field: "table_name"
+// path_template: "{project_id=projects/*}/**"
+// }
+// routing_parameters {
+// field: "app_profile_id"
+// path_template: "{routing_id=**}"
+// }
+// };
+//
+// result:
+//
+// x-goog-request-params:
+// project_id=projects/proj_foo&routing_id=profiles/prof_qux
+//
+// Example 8
+//
+// Extracting a single routing header key-value pair by matching
+// several conflictingly named path templates on several request fields. The
+// last template to match "wins" the conflict.
+//
+// annotation:
+//
+// option (google.api.routing) = {
+// // The `routing_id` can be a project id or a region id depending on
+// // the table name format, but only if the `app_profile_id` is not set.
+// // If `app_profile_id` is set it should be used instead.
+//
+// routing_parameters {
+// field: "table_name"
+// path_template: "{routing_id=projects/*}/**"
+// }
+// routing_parameters {
+// field: "table_name"
+// path_template: "{routing_id=regions/*}/**"
+// }
+// routing_parameters {
+// field: "app_profile_id"
+// path_template: "{routing_id=**}"
+// }
+// };
+//
+// result:
+//
+// x-goog-request-params: routing_id=profiles/prof_qux
+//
+// Example 9
+//
+// Bringing it all together.
+//
+// annotation:
+//
+// option (google.api.routing) = {
+// // For routing both `table_location` and a `routing_id` are needed.
+// //
+// // table_location can be either an instance id or a region+zone id.
+// //
+// // For `routing_id`, take the value of `app_profile_id`
+// // - If it's in the format `profiles/<profile_id>`, send
+// // just the `<profile_id>` part.
+// // - If it's any other literal, send it as is.
+// // If the `app_profile_id` is empty, and the `table_name` starts with
+// // the project_id, send that instead.
+//
+// routing_parameters {
+// field: "table_name"
+// path_template: "projects/*/{table_location=instances/*}/tables/*"
+// }
+// routing_parameters {
+// field: "table_name"
+// path_template: "{table_location=regions/*/zones/*}/tables/*"
+// }
+// routing_parameters {
+// field: "table_name"
+// path_template: "{routing_id=projects/*}/**"
+// }
+// routing_parameters {
+// field: "app_profile_id"
+// path_template: "{routing_id=**}"
+// }
+// routing_parameters {
+// field: "app_profile_id"
+// path_template: "profiles/{routing_id=*}"
+// }
+// };
+//
+// result:
+//
+// x-goog-request-params:
+// table_location=instances/instance_bar&routing_id=prof_qux
+type RoutingRule struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A collection of Routing Parameter specifications.
+ // **NOTE:** If multiple Routing Parameters describe the same key
+ // (via the `path_template` field or via the `field` field when
+ // `path_template` is not provided), "last one wins" rule
+ // determines which Parameter gets used.
+ // See the examples for more details.
+ RoutingParameters []*RoutingParameter `protobuf:"bytes,2,rep,name=routing_parameters,json=routingParameters,proto3" json:"routing_parameters,omitempty"`
+}
+
+func (x *RoutingRule) Reset() {
+ *x = RoutingRule{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_routing_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RoutingRule) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RoutingRule) ProtoMessage() {}
+
+func (x *RoutingRule) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_routing_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RoutingRule.ProtoReflect.Descriptor instead.
+func (*RoutingRule) Descriptor() ([]byte, []int) {
+ return file_google_api_routing_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *RoutingRule) GetRoutingParameters() []*RoutingParameter {
+ if x != nil {
+ return x.RoutingParameters
+ }
+ return nil
+}
+
+// A projection from an input message to the GRPC or REST header.
+type RoutingParameter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A request field to extract the header key-value pair from.
+ Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"`
+ // A pattern matching the key-value field. Optional.
+ // If not specified, the whole field specified in the `field` field will be
+ // taken as value, and its name used as key. If specified, it MUST contain
+ // exactly one named segment (along with any number of unnamed segments) The
+ // pattern will be matched over the field specified in the `field` field, then
+ // if the match is successful:
+ // - the name of the single named segment will be used as a header name,
+ // - the match value of the segment will be used as a header value;
+ // if the match is NOT successful, nothing will be sent.
+ //
+ // Example:
+ //
+ // -- This is a field in the request message
+ // | that the header value will be extracted from.
+ // |
+ // | -- This is the key name in the
+ // | | routing header.
+ // V |
+ // field: "table_name" v
+ // path_template: "projects/*/{table_location=instances/*}/tables/*"
+ // ^ ^
+ // | |
+ // In the {} brackets is the pattern that -- |
+ // specifies what to extract from the |
+ // field as a value to be sent. |
+ // |
+ // The string in the field must match the whole pattern --
+ // before brackets, inside brackets, after brackets.
+ //
+ // When looking at this specific example, we can see that:
+ // - A key-value pair with the key `table_location`
+ // and the value matching `instances/*` should be added
+ // to the x-goog-request-params routing header.
+ // - The value is extracted from the request message's `table_name` field
+ // if it matches the full pattern specified:
+ // `projects/*/instances/*/tables/*`.
+ //
+ // **NB:** If the `path_template` field is not provided, the key name is
+ // equal to the field name, and the whole field should be sent as a value.
+ // This makes the pattern for the field and the value functionally equivalent
+ // to `**`, and the configuration
+ //
+ // {
+ // field: "table_name"
+ // }
+ //
+ // is a functionally equivalent shorthand to:
+ //
+ // {
+ // field: "table_name"
+ // path_template: "{table_name=**}"
+ // }
+ //
+ // See Example 1 for more details.
+ PathTemplate string `protobuf:"bytes,2,opt,name=path_template,json=pathTemplate,proto3" json:"path_template,omitempty"`
+}
+
+func (x *RoutingParameter) Reset() {
+ *x = RoutingParameter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_routing_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RoutingParameter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RoutingParameter) ProtoMessage() {}
+
+func (x *RoutingParameter) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_routing_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RoutingParameter.ProtoReflect.Descriptor instead.
+func (*RoutingParameter) Descriptor() ([]byte, []int) {
+ return file_google_api_routing_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *RoutingParameter) GetField() string {
+ if x != nil {
+ return x.Field
+ }
+ return ""
+}
+
+func (x *RoutingParameter) GetPathTemplate() string {
+ if x != nil {
+ return x.PathTemplate
+ }
+ return ""
+}
+
+var file_google_api_routing_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.MethodOptions)(nil),
+ ExtensionType: (*RoutingRule)(nil),
+ Field: 72295729,
+ Name: "google.api.routing",
+ Tag: "bytes,72295729,opt,name=routing",
+ Filename: "google/api/routing.proto",
+ },
+}
+
+// Extension fields to descriptorpb.MethodOptions.
+var (
+ // See RoutingRule.
+ //
+ // optional google.api.RoutingRule routing = 72295729;
+ E_Routing = &file_google_api_routing_proto_extTypes[0]
+)
+
+var File_google_api_routing_proto protoreflect.FileDescriptor
+
+var file_google_api_routing_proto_rawDesc = []byte{
+ 0x0a, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75,
+ 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x5a, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74,
+ 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x4b, 0x0a, 0x12, 0x72, 0x6f, 0x75, 0x74, 0x69,
+ 0x6e, 0x67, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
+ 0x72, 0x52, 0x11, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65,
+ 0x74, 0x65, 0x72, 0x73, 0x22, 0x4d, 0x0a, 0x10, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50,
+ 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x23,
+ 0x0a, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x54, 0x65, 0x6d, 0x70, 0x6c,
+ 0x61, 0x74, 0x65, 0x3a, 0x54, 0x0a, 0x07, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1e,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb1,
+ 0xca, 0xbc, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65,
+ 0x52, 0x07, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x42, 0x6a, 0x0a, 0x0e, 0x63, 0x6f, 0x6d,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0c, 0x52, 0x6f, 0x75,
+ 0x74, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67,
+ 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02,
+ 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_api_routing_proto_rawDescOnce sync.Once
+ file_google_api_routing_proto_rawDescData = file_google_api_routing_proto_rawDesc
+)
+
+func file_google_api_routing_proto_rawDescGZIP() []byte {
+ file_google_api_routing_proto_rawDescOnce.Do(func() {
+ file_google_api_routing_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_routing_proto_rawDescData)
+ })
+ return file_google_api_routing_proto_rawDescData
+}
+
+var file_google_api_routing_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_google_api_routing_proto_goTypes = []interface{}{
+ (*RoutingRule)(nil), // 0: google.api.RoutingRule
+ (*RoutingParameter)(nil), // 1: google.api.RoutingParameter
+ (*descriptorpb.MethodOptions)(nil), // 2: google.protobuf.MethodOptions
+}
+var file_google_api_routing_proto_depIdxs = []int32{
+ 1, // 0: google.api.RoutingRule.routing_parameters:type_name -> google.api.RoutingParameter
+ 2, // 1: google.api.routing:extendee -> google.protobuf.MethodOptions
+ 0, // 2: google.api.routing:type_name -> google.api.RoutingRule
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 2, // [2:3] is the sub-list for extension type_name
+ 1, // [1:2] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_google_api_routing_proto_init() }
+func file_google_api_routing_proto_init() {
+ if File_google_api_routing_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_api_routing_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RoutingRule); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_routing_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RoutingParameter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_api_routing_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_google_api_routing_proto_goTypes,
+ DependencyIndexes: file_google_api_routing_proto_depIdxs,
+ MessageInfos: file_google_api_routing_proto_msgTypes,
+ ExtensionInfos: file_google_api_routing_proto_extTypes,
+ }.Build()
+ File_google_api_routing_proto = out.File
+ file_google_api_routing_proto_rawDesc = nil
+ file_google_api_routing_proto_goTypes = nil
+ file_google_api_routing_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
index c988461..f34a38e 100644
--- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
@@ -1,26 +1,40 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.12.2
// source: google/rpc/status.proto
package status
import (
- fmt "fmt"
- math "math"
+ reflect "reflect"
+ sync "sync"
- proto "github.com/golang/protobuf/proto"
- any "github.com/golang/protobuf/ptypes/any"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
// The `Status` type defines a logical error model that is suitable for
// different programming environments, including REST APIs and RPC APIs. It is
@@ -30,6 +44,10 @@
// You can find out more about this error model and how to work with it in the
// [API Design Guide](https://cloud.google.com/apis/design/errors).
type Status struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
// A developer-facing error message, which should be in English. Any
@@ -38,80 +56,146 @@
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
// A list of messages that carry the error details. There is a common set of
// message types for APIs to use.
- Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
}
-func (m *Status) Reset() { *m = Status{} }
-func (m *Status) String() string { return proto.CompactTextString(m) }
-func (*Status) ProtoMessage() {}
+func (x *Status) Reset() {
+ *x = Status{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_status_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Status) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Status) ProtoMessage() {}
+
+func (x *Status) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_status_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Status.ProtoReflect.Descriptor instead.
func (*Status) Descriptor() ([]byte, []int) {
- return fileDescriptor_24d244abaf643bfe, []int{0}
+ return file_google_rpc_status_proto_rawDescGZIP(), []int{0}
}
-func (m *Status) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Status.Unmarshal(m, b)
-}
-func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Status.Marshal(b, m, deterministic)
-}
-func (m *Status) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Status.Merge(m, src)
-}
-func (m *Status) XXX_Size() int {
- return xxx_messageInfo_Status.Size(m)
-}
-func (m *Status) XXX_DiscardUnknown() {
- xxx_messageInfo_Status.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Status proto.InternalMessageInfo
-
-func (m *Status) GetCode() int32 {
- if m != nil {
- return m.Code
+func (x *Status) GetCode() int32 {
+ if x != nil {
+ return x.Code
}
return 0
}
-func (m *Status) GetMessage() string {
- if m != nil {
- return m.Message
+func (x *Status) GetMessage() string {
+ if x != nil {
+ return x.Message
}
return ""
}
-func (m *Status) GetDetails() []*any.Any {
- if m != nil {
- return m.Details
+func (x *Status) GetDetails() []*anypb.Any {
+ if x != nil {
+ return x.Details
}
return nil
}
-func init() {
- proto.RegisterType((*Status)(nil), "google.rpc.Status")
+var File_google_rpc_status_proto protoreflect.FileDescriptor
+
+var file_google_rpc_status_proto_rawDesc = []byte{
+ 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f,
+ 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18,
+ 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61,
+ 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52,
+ 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x61, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3b, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
}
-func init() {
- proto.RegisterFile("google/rpc/status.proto", fileDescriptor_24d244abaf643bfe)
+var (
+ file_google_rpc_status_proto_rawDescOnce sync.Once
+ file_google_rpc_status_proto_rawDescData = file_google_rpc_status_proto_rawDesc
+)
+
+func file_google_rpc_status_proto_rawDescGZIP() []byte {
+ file_google_rpc_status_proto_rawDescOnce.Do(func() {
+ file_google_rpc_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_status_proto_rawDescData)
+ })
+ return file_google_rpc_status_proto_rawDescData
}
-var fileDescriptor_24d244abaf643bfe = []byte{
- // 212 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f,
- 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28,
- 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81,
- 0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1,
- 0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83,
- 0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05,
- 0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7,
- 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7,
- 0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x44, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c,
- 0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12,
- 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12,
- 0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x7e, 0x30, 0x32, 0x2e, 0x62, 0x62, 0x0e, 0x0a,
- 0x70, 0x4e, 0x62, 0x03, 0x2b, 0x36, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb9, 0x28, 0x45, 0xb1,
- 0x13, 0x01, 0x00, 0x00,
+var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_rpc_status_proto_goTypes = []interface{}{
+ (*Status)(nil), // 0: google.rpc.Status
+ (*anypb.Any)(nil), // 1: google.protobuf.Any
+}
+var file_google_rpc_status_proto_depIdxs = []int32{
+ 1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_google_rpc_status_proto_init() }
+func file_google_rpc_status_proto_init() {
+ if File_google_rpc_status_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_rpc_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Status); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_rpc_status_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_rpc_status_proto_goTypes,
+ DependencyIndexes: file_google_rpc_status_proto_depIdxs,
+ MessageInfos: file_google_rpc_status_proto_msgTypes,
+ }.Build()
+ File_google_rpc_status_proto = out.File
+ file_google_rpc_status_proto_rawDesc = nil
+ file_google_rpc_status_proto_goTypes = nil
+ file_google_rpc_status_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml
deleted file mode 100644
index a11e8cb..0000000
--- a/vendor/google.golang.org/grpc/.travis.yml
+++ /dev/null
@@ -1,42 +0,0 @@
-language: go
-
-matrix:
- include:
- - go: 1.13.x
- env: VET=1 GO111MODULE=on
- - go: 1.13.x
- env: RACE=1 GO111MODULE=on
- - go: 1.13.x
- env: RUN386=1
- - go: 1.13.x
- env: GRPC_GO_RETRY=on
- - go: 1.13.x
- env: TESTEXTRAS=1
- - go: 1.12.x
- env: GO111MODULE=on
- - go: 1.11.x
- env: GO111MODULE=on
- - go: 1.9.x
- env: GAE=1
-
-go_import_path: google.golang.org/grpc
-
-before_install:
- - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi
- - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi
- - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi
- - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then export VET_SKIP_PROTO=1; fi
-
-install:
- - try3() { eval "$*" || eval "$*" || eval "$*"; }
- - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi'
- - if [[ -n "${GAE}" ]]; then source ./install_gae.sh; make testappenginedeps; fi
- - if [[ -n "${VET}" ]]; then ./vet.sh -install; fi
-
-script:
- - set -e
- - if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; interop/interop_test.sh; make testsubmodule; exit 0; fi
- - if [[ -n "${VET}" ]]; then ./vet.sh; fi
- - if [[ -n "${GAE}" ]]; then make testappengine; exit 0; fi
- - if [[ -n "${RACE}" ]]; then make testrace; exit 0; fi
- - make test
diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md
index 4f1567e..cd03f8c 100644
--- a/vendor/google.golang.org/grpc/CONTRIBUTING.md
+++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md
@@ -57,6 +57,5 @@
- `make vet` to catch vet errors
- `make test` to run the tests
- `make testrace` to run tests in race mode
- - optional `make testappengine` to run tests with appengine
- Exceptions to the rules can be made if there's a compelling reason for doing so.
diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md
index 093c82b..c6672c0 100644
--- a/vendor/google.golang.org/grpc/MAINTAINERS.md
+++ b/vendor/google.golang.org/grpc/MAINTAINERS.md
@@ -8,17 +8,18 @@
for general contribution guidelines.
## Maintainers (in alphabetical order)
-- [canguler](https://github.com/canguler), Google LLC
+
- [cesarghali](https://github.com/cesarghali), Google LLC
- [dfawley](https://github.com/dfawley), Google LLC
- [easwars](https://github.com/easwars), Google LLC
-- [jadekler](https://github.com/jadekler), Google LLC
- [menghanl](https://github.com/menghanl), Google LLC
- [srini100](https://github.com/srini100), Google LLC
## Emeritus Maintainers (in alphabetical order)
- [adelez](https://github.com/adelez), Google LLC
+- [canguler](https://github.com/canguler), Google LLC
- [iamqizhao](https://github.com/iamqizhao), Google LLC
+- [jadekler](https://github.com/jadekler), Google LLC
- [jtattermusch](https://github.com/jtattermusch), Google LLC
- [lyuxuan](https://github.com/lyuxuan), Google LLC
- [makmukhi](https://github.com/makmukhi), Google LLC
diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile
index 410f7d5..1f89609 100644
--- a/vendor/google.golang.org/grpc/Makefile
+++ b/vendor/google.golang.org/grpc/Makefile
@@ -1,13 +1,13 @@
all: vet test testrace
-build: deps
+build:
go build google.golang.org/grpc/...
clean:
go clean -i google.golang.org/grpc/...
deps:
- go get -d -v google.golang.org/grpc/...
+ GO111MODULE=on go get -d -v google.golang.org/grpc/...
proto:
@ if ! which protoc > /dev/null; then \
@@ -16,29 +16,18 @@
fi
go generate google.golang.org/grpc/...
-test: testdeps
+test:
go test -cpu 1,4 -timeout 7m google.golang.org/grpc/...
-testsubmodule: testdeps
+testsubmodule:
cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/...
+ cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/...
-testappengine: testappenginedeps
- goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/...
-
-testappenginedeps:
- goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/...
-
-testdeps:
- go get -d -v -t google.golang.org/grpc/...
-
-testrace: testdeps
+testrace:
go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/...
-updatedeps:
- go get -d -v -u -f google.golang.org/grpc/...
-
-updatetestdeps:
- go get -d -v -t -u -f google.golang.org/grpc/...
+testdeps:
+ GO111MODULE=on go get -d -v -t google.golang.org/grpc/...
vet: vetdeps
./vet.sh
@@ -50,14 +39,8 @@
all \
build \
clean \
- deps \
proto \
test \
- testappengine \
- testappenginedeps \
- testdeps \
testrace \
- updatedeps \
- updatetestdeps \
vet \
vetdeps
diff --git a/vendor/google.golang.org/grpc/NOTICE.txt b/vendor/google.golang.org/grpc/NOTICE.txt
new file mode 100644
index 0000000..5301977
--- /dev/null
+++ b/vendor/google.golang.org/grpc/NOTICE.txt
@@ -0,0 +1,13 @@
+Copyright 2014 gRPC authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md
index 800e7bd..0e6ae69 100644
--- a/vendor/google.golang.org/grpc/README.md
+++ b/vendor/google.golang.org/grpc/README.md
@@ -1,64 +1,53 @@
# gRPC-Go
[](https://travis-ci.org/grpc/grpc-go)
-[](https://godoc.org/google.golang.org/grpc)
+[][API]
[](https://goreportcard.com/report/github.com/grpc/grpc-go)
-The Go implementation of [gRPC](https://grpc.io/): A high performance, open
-source, general RPC framework that puts mobile and HTTP/2 first. For more
-information see the [gRPC Quick Start:
-Go](https://grpc.io/docs/quickstart/go.html) guide.
+The [Go][] implementation of [gRPC][]: A high performance, open source, general
+RPC framework that puts mobile and HTTP/2 first. For more information see the
+[Go gRPC docs][], or jump directly into the [quick start][].
-Installation
-------------
+## Prerequisites
-To install this package, you need to install Go and setup your Go workspace on
-your computer. The simplest way to install the library is to run:
+- **[Go][]**: any one of the **three latest major** [releases][go-releases].
+## Installation
+
+With [Go module][] support (Go 1.11+), simply add the following import
+
+```go
+import "google.golang.org/grpc"
```
+
+to your code, and then `go [build|run|test]` will automatically fetch the
+necessary dependencies.
+
+Otherwise, to install the `grpc-go` package, run the following command:
+
+```console
$ go get -u google.golang.org/grpc
```
-With Go module support (Go 1.11+), simply `import "google.golang.org/grpc"` in
-your source code and `go [build|run|test]` will automatically download the
-necessary dependencies ([Go modules
-ref](https://github.com/golang/go/wiki/Modules)).
+> **Note:** If you are trying to access `grpc-go` from **China**, see the
+> [FAQ](#FAQ) below.
-If you are trying to access grpc-go from within China, please see the
-[FAQ](#FAQ) below.
+## Learn more
-Prerequisites
--------------
-gRPC-Go requires Go 1.9 or later.
+- [Go gRPC docs][], which include a [quick start][] and [API
+ reference][API] among other resources
+- [Low-level technical docs](Documentation) from this repository
+- [Performance benchmark][]
+- [Examples](examples)
-Documentation
--------------
-- See [godoc](https://godoc.org/google.golang.org/grpc) for package and API
- descriptions.
-- Documentation on specific topics can be found in the [Documentation
- directory](Documentation/).
-- Examples can be found in the [examples directory](examples/).
+## FAQ
-Performance
------------
-Performance benchmark data for grpc-go and other languages is maintained in
-[this
-dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696).
+### I/O Timeout Errors
-Status
-------
-General Availability [Google Cloud Platform Launch
-Stages](https://cloud.google.com/terms/launch-stages).
-
-FAQ
----
-
-#### I/O Timeout Errors
-
-The `golang.org` domain may be blocked from some countries. `go get` usually
+The `golang.org` domain may be blocked from some countries. `go get` usually
produces an error like the following when this happens:
-```
+```console
$ go get -u google.golang.org/grpc
package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout)
```
@@ -69,7 +58,7 @@
- Without Go module support: `git clone` the repo manually:
- ```
+ ```sh
git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc
```
@@ -79,7 +68,7 @@
- With Go module support: it is possible to use the `replace` feature of `go
mod` to create aliases for golang.org packages. In your project's directory:
- ```
+ ```sh
go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest
go mod tidy
go mod vendor
@@ -87,19 +76,17 @@
```
Again, this will need to be done for all transitive dependencies hosted on
- golang.org as well. Please refer to [this
- issue](https://github.com/golang/go/issues/28652) in the golang repo regarding
- this concern.
+ golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652).
-#### Compiling error, undefined: grpc.SupportPackageIsVersion
+### Compiling error, undefined: grpc.SupportPackageIsVersion
-##### If you are using Go modules:
+#### If you are using Go modules:
-Please ensure your gRPC-Go version is `require`d at the appropriate version in
+Ensure your gRPC-Go version is `require`d at the appropriate version in
the same module containing the generated `.pb.go` files. For example,
`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file:
-```
+```go
module <your module name>
require (
@@ -107,23 +94,27 @@
)
```
-##### If you are *not* using Go modules:
+#### If you are *not* using Go modules:
-Please update proto package, gRPC package and rebuild the proto files:
- - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`
- - `go get -u google.golang.org/grpc`
- - `protoc --go_out=plugins=grpc:. *.proto`
+Update the `proto` package, gRPC package, and rebuild the `.proto` files:
-#### How to turn on logging
-
-The default logger is controlled by the environment variables. Turn everything
-on by setting:
-
-```
-GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info
+```sh
+go get -u github.com/golang/protobuf/{proto,protoc-gen-go}
+go get -u google.golang.org/grpc
+protoc --go_out=plugins=grpc:. *.proto
```
-#### The RPC failed with error `"code = Unavailable desc = transport is closing"`
+### How to turn on logging
+
+The default logger is controlled by environment variables. Turn everything on
+like this:
+
+```console
+$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99
+$ export GRPC_GO_LOG_SEVERITY_LEVEL=info
+```
+
+### The RPC failed with error `"code = Unavailable desc = transport is closing"`
This error means the connection the RPC is using was closed, and there are many
possible reasons, including:
@@ -139,3 +130,12 @@
the root cause of the connection being closed is on the server side. Turn on
logging on __both client and server__, and see if there are any transport
errors.
+
+[API]: https://pkg.go.dev/google.golang.org/grpc
+[Go]: https://golang.org
+[Go module]: https://github.com/golang/go/wiki/Modules
+[gRPC]: https://grpc.io
+[Go gRPC docs]: https://grpc.io/docs/languages/go
+[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608
+[quick start]: https://grpc.io/docs/languages/go/quickstart
+[go-releases]: https://golang.org/doc/devel/release.html
diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md
new file mode 100644
index 0000000..be6e108
--- /dev/null
+++ b/vendor/google.golang.org/grpc/SECURITY.md
@@ -0,0 +1,3 @@
+# Security Policy
+
+For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go
index 68ffc62..ae13dda 100644
--- a/vendor/google.golang.org/grpc/attributes/attributes.go
+++ b/vendor/google.golang.org/grpc/attributes/attributes.go
@@ -19,52 +19,83 @@
// Package attributes defines a generic key/value store used in various gRPC
// components.
//
-// All APIs in this package are EXPERIMENTAL.
+// Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
package attributes
-import "fmt"
-
// Attributes is an immutable struct for storing and retrieving generic
// key/value pairs. Keys must be hashable, and users should define their own
-// types for keys.
+// types for keys. Values should not be modified after they are added to an
+// Attributes or if they were received from one. If values implement 'Equal(o
+// interface{}) bool', it will be called by (*Attributes).Equal to determine
+// whether two values with the same key should be considered equal.
type Attributes struct {
m map[interface{}]interface{}
}
-// New returns a new Attributes containing all key/value pairs in kvs. If the
-// same key appears multiple times, the last value overwrites all previous
-// values for that key. Panics if len(kvs) is not even.
-func New(kvs ...interface{}) *Attributes {
- if len(kvs)%2 != 0 {
- panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
- }
- a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)}
- for i := 0; i < len(kvs)/2; i++ {
- a.m[kvs[i*2]] = kvs[i*2+1]
- }
- return a
+// New returns a new Attributes containing the key/value pair.
+func New(key, value interface{}) *Attributes {
+ return &Attributes{m: map[interface{}]interface{}{key: value}}
}
-// WithValues returns a new Attributes containing all key/value pairs in a and
-// kvs. Panics if len(kvs) is not even. If the same key appears multiple
-// times, the last value overwrites all previous values for that key. To
-// remove an existing key, use a nil value.
-func (a *Attributes) WithValues(kvs ...interface{}) *Attributes {
- if len(kvs)%2 != 0 {
- panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
+// WithValue returns a new Attributes containing the previous keys and values
+// and the new key/value pair. If the same key appears multiple times, the
+// last value overwrites all previous values for that key. To remove an
+// existing key, use a nil value. value should not be modified later.
+func (a *Attributes) WithValue(key, value interface{}) *Attributes {
+ if a == nil {
+ return New(key, value)
}
- n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)}
+ n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)}
for k, v := range a.m {
n.m[k] = v
}
- for i := 0; i < len(kvs)/2; i++ {
- n.m[kvs[i*2]] = kvs[i*2+1]
- }
+ n.m[key] = value
return n
}
// Value returns the value associated with these attributes for key, or nil if
-// no value is associated with key.
+// no value is associated with key. The returned value should not be modified.
func (a *Attributes) Value(key interface{}) interface{} {
+ if a == nil {
+ return nil
+ }
return a.m[key]
}
+
+// Equal returns whether a and o are equivalent. If 'Equal(o interface{})
+// bool' is implemented for a value in the attributes, it is called to
+// determine if the value matches the one stored in the other attributes. If
+// Equal is not implemented, standard equality is used to determine if the two
+// values are equal. Note that some types (e.g. maps) aren't comparable by
+// default, so they must be wrapped in a struct, or in an alias type, with Equal
+// defined.
+func (a *Attributes) Equal(o *Attributes) bool {
+ if a == nil && o == nil {
+ return true
+ }
+ if a == nil || o == nil {
+ return false
+ }
+ if len(a.m) != len(o.m) {
+ return false
+ }
+ for k, v := range a.m {
+ ov, ok := o.m[k]
+ if !ok {
+ // o missing element of a
+ return false
+ }
+ if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok {
+ if !eq.Equal(ov) {
+ return false
+ }
+ } else if v != ov {
+ // Fallback to a standard equality check if Value is unimplemented.
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go
index ff7c3ee..542594f 100644
--- a/vendor/google.golang.org/grpc/backoff.go
+++ b/vendor/google.golang.org/grpc/backoff.go
@@ -48,7 +48,10 @@
// here for more details:
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
//
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type ConnectParams struct {
// Backoff specifies the configuration options for connection backoff.
Backoff backoff.Config
diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go
deleted file mode 100644
index a8eb0f4..0000000
--- a/vendor/google.golang.org/grpc/balancer.go
+++ /dev/null
@@ -1,391 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "context"
- "net"
- "sync"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/naming"
- "google.golang.org/grpc/status"
-)
-
-// Address represents a server the client connects to.
-//
-// Deprecated: please use package balancer.
-type Address struct {
- // Addr is the server address on which a connection will be established.
- Addr string
- // Metadata is the information associated with Addr, which may be used
- // to make load balancing decision.
- Metadata interface{}
-}
-
-// BalancerConfig specifies the configurations for Balancer.
-//
-// Deprecated: please use package balancer. May be removed in a future 1.x release.
-type BalancerConfig struct {
- // DialCreds is the transport credential the Balancer implementation can
- // use to dial to a remote load balancer server. The Balancer implementations
- // can ignore this if it does not need to talk to another party securely.
- DialCreds credentials.TransportCredentials
- // Dialer is the custom dialer the Balancer implementation can use to dial
- // to a remote load balancer server. The Balancer implementations
- // can ignore this if it doesn't need to talk to remote balancer.
- Dialer func(context.Context, string) (net.Conn, error)
-}
-
-// BalancerGetOptions configures a Get call.
-//
-// Deprecated: please use package balancer. May be removed in a future 1.x release.
-type BalancerGetOptions struct {
- // BlockingWait specifies whether Get should block when there is no
- // connected address.
- BlockingWait bool
-}
-
-// Balancer chooses network addresses for RPCs.
-//
-// Deprecated: please use package balancer. May be removed in a future 1.x release.
-type Balancer interface {
- // Start does the initialization work to bootstrap a Balancer. For example,
- // this function may start the name resolution and watch the updates. It will
- // be called when dialing.
- Start(target string, config BalancerConfig) error
- // Up informs the Balancer that gRPC has a connection to the server at
- // addr. It returns down which is called once the connection to addr gets
- // lost or closed.
- // TODO: It is not clear how to construct and take advantage of the meaningful error
- // parameter for down. Need realistic demands to guide.
- Up(addr Address) (down func(error))
- // Get gets the address of a server for the RPC corresponding to ctx.
- // i) If it returns a connected address, gRPC internals issues the RPC on the
- // connection to this address;
- // ii) If it returns an address on which the connection is under construction
- // (initiated by Notify(...)) but not connected, gRPC internals
- // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or
- // Shutdown state;
- // or
- // * issues RPC on the connection otherwise.
- // iii) If it returns an address on which the connection does not exist, gRPC
- // internals treats it as an error and will fail the corresponding RPC.
- //
- // Therefore, the following is the recommended rule when writing a custom Balancer.
- // If opts.BlockingWait is true, it should return a connected address or
- // block if there is no connected address. It should respect the timeout or
- // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast
- // RPCs), it should return an address it has notified via Notify(...) immediately
- // instead of blocking.
- //
- // The function returns put which is called once the rpc has completed or failed.
- // put can collect and report RPC stats to a remote load balancer.
- //
- // This function should only return the errors Balancer cannot recover by itself.
- // gRPC internals will fail the RPC if an error is returned.
- Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error)
- // Notify returns a channel that is used by gRPC internals to watch the addresses
- // gRPC needs to connect. The addresses might be from a name resolver or remote
- // load balancer. gRPC internals will compare it with the existing connected
- // addresses. If the address Balancer notified is not in the existing connected
- // addresses, gRPC starts to connect the address. If an address in the existing
- // connected addresses is not in the notification list, the corresponding connection
- // is shutdown gracefully. Otherwise, there are no operations to take. Note that
- // the Address slice must be the full list of the Addresses which should be connected.
- // It is NOT delta.
- Notify() <-chan []Address
- // Close shuts down the balancer.
- Close() error
-}
-
-// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
-// the name resolution updates and updates the addresses available correspondingly.
-//
-// Deprecated: please use package balancer/roundrobin. May be removed in a future 1.x release.
-func RoundRobin(r naming.Resolver) Balancer {
- return &roundRobin{r: r}
-}
-
-type addrInfo struct {
- addr Address
- connected bool
-}
-
-type roundRobin struct {
- r naming.Resolver
- w naming.Watcher
- addrs []*addrInfo // all the addresses the client should potentially connect
- mu sync.Mutex
- addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to.
- next int // index of the next address to return for Get()
- waitCh chan struct{} // the channel to block when there is no connected address available
- done bool // The Balancer is closed.
-}
-
-func (rr *roundRobin) watchAddrUpdates() error {
- updates, err := rr.w.Next()
- if err != nil {
- grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err)
- return err
- }
- rr.mu.Lock()
- defer rr.mu.Unlock()
- for _, update := range updates {
- addr := Address{
- Addr: update.Addr,
- Metadata: update.Metadata,
- }
- switch update.Op {
- case naming.Add:
- var exist bool
- for _, v := range rr.addrs {
- if addr == v.addr {
- exist = true
- grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr)
- break
- }
- }
- if exist {
- continue
- }
- rr.addrs = append(rr.addrs, &addrInfo{addr: addr})
- case naming.Delete:
- for i, v := range rr.addrs {
- if addr == v.addr {
- copy(rr.addrs[i:], rr.addrs[i+1:])
- rr.addrs = rr.addrs[:len(rr.addrs)-1]
- break
- }
- }
- default:
- grpclog.Errorln("Unknown update.Op ", update.Op)
- }
- }
- // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified.
- open := make([]Address, len(rr.addrs))
- for i, v := range rr.addrs {
- open[i] = v.addr
- }
- if rr.done {
- return ErrClientConnClosing
- }
- select {
- case <-rr.addrCh:
- default:
- }
- rr.addrCh <- open
- return nil
-}
-
-func (rr *roundRobin) Start(target string, config BalancerConfig) error {
- rr.mu.Lock()
- defer rr.mu.Unlock()
- if rr.done {
- return ErrClientConnClosing
- }
- if rr.r == nil {
- // If there is no name resolver installed, it is not needed to
- // do name resolution. In this case, target is added into rr.addrs
- // as the only address available and rr.addrCh stays nil.
- rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}})
- return nil
- }
- w, err := rr.r.Resolve(target)
- if err != nil {
- return err
- }
- rr.w = w
- rr.addrCh = make(chan []Address, 1)
- go func() {
- for {
- if err := rr.watchAddrUpdates(); err != nil {
- return
- }
- }
- }()
- return nil
-}
-
-// Up sets the connected state of addr and sends notification if there are pending
-// Get() calls.
-func (rr *roundRobin) Up(addr Address) func(error) {
- rr.mu.Lock()
- defer rr.mu.Unlock()
- var cnt int
- for _, a := range rr.addrs {
- if a.addr == addr {
- if a.connected {
- return nil
- }
- a.connected = true
- }
- if a.connected {
- cnt++
- }
- }
- // addr is only one which is connected. Notify the Get() callers who are blocking.
- if cnt == 1 && rr.waitCh != nil {
- close(rr.waitCh)
- rr.waitCh = nil
- }
- return func(err error) {
- rr.down(addr, err)
- }
-}
-
-// down unsets the connected state of addr.
-func (rr *roundRobin) down(addr Address, err error) {
- rr.mu.Lock()
- defer rr.mu.Unlock()
- for _, a := range rr.addrs {
- if addr == a.addr {
- a.connected = false
- break
- }
- }
-}
-
-// Get returns the next addr in the rotation.
-func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
- var ch chan struct{}
- rr.mu.Lock()
- if rr.done {
- rr.mu.Unlock()
- err = ErrClientConnClosing
- return
- }
-
- if len(rr.addrs) > 0 {
- if rr.next >= len(rr.addrs) {
- rr.next = 0
- }
- next := rr.next
- for {
- a := rr.addrs[next]
- next = (next + 1) % len(rr.addrs)
- if a.connected {
- addr = a.addr
- rr.next = next
- rr.mu.Unlock()
- return
- }
- if next == rr.next {
- // Has iterated all the possible address but none is connected.
- break
- }
- }
- }
- if !opts.BlockingWait {
- if len(rr.addrs) == 0 {
- rr.mu.Unlock()
- err = status.Errorf(codes.Unavailable, "there is no address available")
- return
- }
- // Returns the next addr on rr.addrs for failfast RPCs.
- addr = rr.addrs[rr.next].addr
- rr.next++
- rr.mu.Unlock()
- return
- }
- // Wait on rr.waitCh for non-failfast RPCs.
- if rr.waitCh == nil {
- ch = make(chan struct{})
- rr.waitCh = ch
- } else {
- ch = rr.waitCh
- }
- rr.mu.Unlock()
- for {
- select {
- case <-ctx.Done():
- err = ctx.Err()
- return
- case <-ch:
- rr.mu.Lock()
- if rr.done {
- rr.mu.Unlock()
- err = ErrClientConnClosing
- return
- }
-
- if len(rr.addrs) > 0 {
- if rr.next >= len(rr.addrs) {
- rr.next = 0
- }
- next := rr.next
- for {
- a := rr.addrs[next]
- next = (next + 1) % len(rr.addrs)
- if a.connected {
- addr = a.addr
- rr.next = next
- rr.mu.Unlock()
- return
- }
- if next == rr.next {
- // Has iterated all the possible address but none is connected.
- break
- }
- }
- }
- // The newly added addr got removed by Down() again.
- if rr.waitCh == nil {
- ch = make(chan struct{})
- rr.waitCh = ch
- } else {
- ch = rr.waitCh
- }
- rr.mu.Unlock()
- }
- }
-}
-
-func (rr *roundRobin) Notify() <-chan []Address {
- return rr.addrCh
-}
-
-func (rr *roundRobin) Close() error {
- rr.mu.Lock()
- defer rr.mu.Unlock()
- if rr.done {
- return errBalancerClosed
- }
- rr.done = true
- if rr.w != nil {
- rr.w.Close()
- }
- if rr.waitCh != nil {
- close(rr.waitCh)
- rr.waitCh = nil
- }
- if rr.addrCh != nil {
- close(rr.addrCh)
- }
- return nil
-}
-
-// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn.
-// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get()
-// returns the only address Up by resetTransport().
-type pickFirst struct {
- *roundRobin
-}
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
index 9258858..bcc6f54 100644
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -75,24 +75,26 @@
return nil
}
-// SubConn represents a gRPC sub connection.
-// Each sub connection contains a list of addresses. gRPC will
-// try to connect to them (in sequence), and stop trying the
-// remainder once one connection is successful.
+// A SubConn represents a single connection to a gRPC backend service.
//
-// The reconnect backoff will be applied on the list, not a single address.
-// For example, try_on_all_addresses -> backoff -> try_on_all_addresses.
+// Each SubConn contains a list of addresses.
//
-// All SubConns start in IDLE, and will not try to connect. To trigger
-// the connecting, Balancers must call Connect.
-// When the connection encounters an error, it will reconnect immediately.
-// When the connection becomes IDLE, it will not reconnect unless Connect is
-// called.
+// All SubConns start in IDLE, and will not try to connect. To trigger the
+// connecting, Balancers must call Connect. If a connection re-enters IDLE,
+// Balancers must call Connect again to trigger a new connection attempt.
//
-// This interface is to be implemented by gRPC. Users should not need a
-// brand new implementation of this interface. For the situations like
-// testing, the new implementation should embed this interface. This allows
-// gRPC to add new methods to this interface.
+// gRPC will try to connect to the addresses in sequence, and stop trying the
+// remainder once the first connection is successful. If an attempt to connect
+// to all addresses encounters an error, the SubConn will enter
+// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE.
+//
+// Once established, if a connection is lost, the SubConn will transition
+// directly to IDLE.
+//
+// This interface is to be implemented by gRPC. Users should not need their own
+// implementation of this interface. For situations like testing, any
+// implementations should embed this interface. This allows gRPC to add new
+// methods to this interface.
type SubConn interface {
// UpdateAddresses updates the addresses used in this SubConn.
// gRPC checks if currently-connected address is still in the new list.
@@ -101,6 +103,9 @@
// a new connection will be created.
//
// This will trigger a state transition for the SubConn.
+ //
+ // Deprecated: This method is now part of the ClientConn interface and will
+ // eventually be removed from here.
UpdateAddresses([]resolver.Address)
// Connect starts the connecting for this SubConn.
Connect()
@@ -111,6 +116,9 @@
// CredsBundle is the credentials bundle that will be used in the created
// SubConn. If it's nil, the original creds from grpc DialOptions will be
// used.
+ //
+ // Deprecated: Use the Attributes field in resolver.Address to pass
+ // arbitrary data to the credential handshaker.
CredsBundle credentials.Bundle
// HealthCheckEnabled indicates whether health check service should be
// enabled on this SubConn
@@ -123,7 +131,7 @@
// determine the state of the ClientConn.
ConnectivityState connectivity.State
// Picker is used to choose connections (SubConns) for RPCs.
- Picker V2Picker
+ Picker Picker
}
// ClientConn represents a gRPC ClientConn.
@@ -140,21 +148,19 @@
// RemoveSubConn removes the SubConn from ClientConn.
// The SubConn will be shutdown.
RemoveSubConn(SubConn)
-
- // UpdateBalancerState is called by balancer to notify gRPC that some internal
- // state in balancer has changed.
+ // UpdateAddresses updates the addresses used in the passed in SubConn.
+ // gRPC checks if the currently connected address is still in the new list.
+ // If so, the connection will be kept. Else, the connection will be
+ // gracefully closed, and a new connection will be created.
//
- // gRPC will update the connectivity state of the ClientConn, and will call pick
- // on the new picker to pick new SubConn.
- //
- // Deprecated: use UpdateState instead
- UpdateBalancerState(s connectivity.State, p Picker)
+ // This will trigger a state transition for the SubConn.
+ UpdateAddresses(SubConn, []resolver.Address)
// UpdateState notifies gRPC that the balancer's internal state has
// changed.
//
- // gRPC will update the connectivity state of the ClientConn, and will call pick
- // on the new picker to pick new SubConns.
+ // gRPC will update the connectivity state of the ClientConn, and will call
+ // Pick on the new Picker to pick new SubConns.
UpdateState(State)
// ResolveNow is called by balancer to notify gRPC to do a name resolving.
@@ -168,21 +174,32 @@
// BuildOptions contains additional information for Build.
type BuildOptions struct {
- // DialCreds is the transport credential the Balancer implementation can
- // use to dial to a remote load balancer server. The Balancer implementations
- // can ignore this if it does not need to talk to another party securely.
+ // DialCreds is the transport credentials to use when communicating with a
+ // remote load balancer server. Balancer implementations which do not
+ // communicate with a remote load balancer server can ignore this field.
DialCreds credentials.TransportCredentials
- // CredsBundle is the credentials bundle that the Balancer can use.
+ // CredsBundle is the credentials bundle to use when communicating with a
+ // remote load balancer server. Balancer implementations which do not
+ // communicate with a remote load balancer server can ignore this field.
CredsBundle credentials.Bundle
- // Dialer is the custom dialer the Balancer implementation can use to dial
- // to a remote load balancer server. The Balancer implementations
- // can ignore this if it doesn't need to talk to remote balancer.
+ // Dialer is the custom dialer to use when communicating with a remote load
+ // balancer server. Balancer implementations which do not communicate with a
+ // remote load balancer server can ignore this field.
Dialer func(context.Context, string) (net.Conn, error)
- // ChannelzParentID is the entity parent's channelz unique identification number.
+ // Authority is the server name to use as part of the authentication
+ // handshake when communicating with a remote load balancer server. Balancer
+ // implementations which do not communicate with a remote load balancer
+ // server can ignore this field.
+ Authority string
+ // ChannelzParentID is the parent ClientConn's channelz ID.
ChannelzParentID int64
- // Target contains the parsed address info of the dial target. It is the same resolver.Target as
- // passed to the resolver.
- // See the documentation for the resolver.Target type for details about what it contains.
+ // CustomUserAgent is the custom user agent set on the parent ClientConn.
+ // The balancer should set the same custom user agent if it creates a
+ // ClientConn.
+ CustomUserAgent string
+ // Target contains the parsed address info of the dial target. It is the
+ // same resolver.Target as passed to the resolver. See the documentation for
+ // the resolver.Target type for details about what it contains.
Target resolver.Target
}
@@ -232,56 +249,17 @@
var (
// ErrNoSubConnAvailable indicates no SubConn is available for pick().
- // gRPC will block the RPC until a new picker is available via UpdateBalancerState().
+ // gRPC will block the RPC until a new picker is available via UpdateState().
ErrNoSubConnAvailable = errors.New("no SubConn is available")
// ErrTransientFailure indicates all SubConns are in TransientFailure.
// WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
- ErrTransientFailure = TransientFailureError(errors.New("all SubConns are in TransientFailure"))
+ //
+ // Deprecated: return an appropriate error based on the last resolution or
+ // connection attempt instead. The behavior is the same for any non-gRPC
+ // status error.
+ ErrTransientFailure = errors.New("all SubConns are in TransientFailure")
)
-// Picker is used by gRPC to pick a SubConn to send an RPC.
-// Balancer is expected to generate a new picker from its snapshot every time its
-// internal state has changed.
-//
-// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
-//
-// Deprecated: use V2Picker instead
-type Picker interface {
- // Pick returns the SubConn to be used to send the RPC.
- // The returned SubConn must be one returned by NewSubConn().
- //
- // This functions is expected to return:
- // - a SubConn that is known to be READY;
- // - ErrNoSubConnAvailable if no SubConn is available, but progress is being
- // made (for example, some SubConn is in CONNECTING mode);
- // - other errors if no active connecting is happening (for example, all SubConn
- // are in TRANSIENT_FAILURE mode).
- //
- // If a SubConn is returned:
- // - If it is READY, gRPC will send the RPC on it;
- // - If it is not ready, or becomes not ready after it's returned, gRPC will
- // block until UpdateBalancerState() is called and will call pick on the
- // new picker. The done function returned from Pick(), if not nil, will be
- // called with nil error, no bytes sent and no bytes received.
- //
- // If the returned error is not nil:
- // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
- // - If the error is ErrTransientFailure or implements IsTransientFailure()
- // bool, returning true:
- // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState()
- // is called to pick again;
- // - Otherwise, RPC will fail with unavailable error.
- // - Else (error is other non-nil error):
- // - The RPC will fail with the error's status code, or Unknown if it is
- // not a status error.
- //
- // The returned done() function will be called once the rpc has finished,
- // with the final status of that RPC. If the SubConn returned is not a
- // valid SubConn type, done may not be called. done may be nil if balancer
- // doesn't care about the RPC status.
- Pick(ctx context.Context, info PickInfo) (conn SubConn, done func(DoneInfo), err error)
-}
-
// PickResult contains information related to a connection chosen for an RPC.
type PickResult struct {
// SubConn is the connection to use for this pick, if its state is Ready.
@@ -297,24 +275,19 @@
Done func(DoneInfo)
}
-type transientFailureError struct {
- error
-}
+// TransientFailureError returns e. It exists for backward compatibility and
+// will be deleted soon.
+//
+// Deprecated: no longer necessary, picker errors are treated this way by
+// default.
+func TransientFailureError(e error) error { return e }
-func (e *transientFailureError) IsTransientFailure() bool { return true }
-
-// TransientFailureError wraps err in an error implementing
-// IsTransientFailure() bool, returning true.
-func TransientFailureError(err error) error {
- return &transientFailureError{error: err}
-}
-
-// V2Picker is used by gRPC to pick a SubConn to send an RPC.
+// Picker is used by gRPC to pick a SubConn to send an RPC.
// Balancer is expected to generate a new picker from its snapshot every time its
// internal state has changed.
//
-// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
-type V2Picker interface {
+// The pickers used by gRPC can be updated by ClientConn.UpdateState().
+type Picker interface {
// Pick returns the connection to use for this RPC and related information.
//
// Pick should not block. If the balancer needs to do I/O or any blocking
@@ -327,14 +300,13 @@
// - If the error is ErrNoSubConnAvailable, gRPC will block until a new
// Picker is provided by the balancer (using ClientConn.UpdateState).
//
- // - If the error implements IsTransientFailure() bool, returning true,
- // wait for ready RPCs will wait, but non-wait for ready RPCs will be
- // terminated with this error's Error() string and status code
- // Unavailable.
+ // - If the error is a status error (implemented by the grpc/status
+ // package), gRPC will terminate the RPC with the code and message
+ // provided.
//
- // - Any other errors terminate all RPCs with the code and message
- // provided. If the error is not a status error, it will be converted by
- // gRPC to a status error with code Unknown.
+ // - For all other errors, wait for ready RPCs will wait, but non-wait for
+ // ready RPCs will be terminated with this error's Error() string and
+ // status code Unavailable.
Pick(info PickInfo) (PickResult, error)
}
@@ -343,34 +315,40 @@
//
// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs.
//
-// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed
-// to be called synchronously from the same goroutine.
-// There's no guarantee on picker.Pick, it may be called anytime.
+// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are
+// guaranteed to be called synchronously from the same goroutine. There's no
+// guarantee on picker.Pick, it may be called anytime.
type Balancer interface {
- // HandleSubConnStateChange is called by gRPC when the connectivity state
- // of sc has changed.
- // Balancer is expected to aggregate all the state of SubConn and report
- // that back to gRPC.
- // Balancer should also generate and update Pickers when its internal state has
- // been changed by the new state.
- //
- // Deprecated: if V2Balancer is implemented by the Balancer,
- // UpdateSubConnState will be called instead.
- HandleSubConnStateChange(sc SubConn, state connectivity.State)
- // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to
- // balancers.
- // Balancer can create new SubConn or remove SubConn with the addresses.
- // An empty address slice and a non-nil error will be passed if the resolver returns
- // non-nil error to gRPC.
- //
- // Deprecated: if V2Balancer is implemented by the Balancer,
- // UpdateClientConnState will be called instead.
- HandleResolvedAddrs([]resolver.Address, error)
+ // UpdateClientConnState is called by gRPC when the state of the ClientConn
+ // changes. If the error returned is ErrBadResolverState, the ClientConn
+ // will begin calling ResolveNow on the active name resolver with
+ // exponential backoff until a subsequent call to UpdateClientConnState
+ // returns a nil error. Any other errors are currently ignored.
+ UpdateClientConnState(ClientConnState) error
+ // ResolverError is called by gRPC when the name resolver reports an error.
+ ResolverError(error)
+ // UpdateSubConnState is called by gRPC when the state of a SubConn
+ // changes.
+ UpdateSubConnState(SubConn, SubConnState)
// Close closes the balancer. The balancer is not required to call
// ClientConn.RemoveSubConn for its existing SubConns.
Close()
}
+// ExitIdler is an optional interface for balancers to implement. If
+// implemented, ExitIdle will be called when ClientConn.Connect is called, if
+// the ClientConn is idle. If unimplemented, ClientConn.Connect will cause
+// all SubConns to connect.
+//
+// Notice: it will be required for all balancers to implement this in a future
+// release.
+type ExitIdler interface {
+ // ExitIdle instructs the LB policy to reconnect to backends / exit the
+ // IDLE state, if appropriate and possible. Note that SubConns that enter
+ // the IDLE state will not reconnect until SubConn.Connect is called.
+ ExitIdle()
+}
+
// SubConnState describes the state of a SubConn.
type SubConnState struct {
// ConnectivityState is the connectivity state of the SubConn.
@@ -393,34 +371,15 @@
// problem with the provided name resolver data.
var ErrBadResolverState = errors.New("bad resolver state")
-// V2Balancer is defined for documentation purposes. If a Balancer also
-// implements V2Balancer, its UpdateClientConnState method will be called
-// instead of HandleResolvedAddrs and its UpdateSubConnState will be called
-// instead of HandleSubConnStateChange.
-type V2Balancer interface {
- // UpdateClientConnState is called by gRPC when the state of the ClientConn
- // changes. If the error returned is ErrBadResolverState, the ClientConn
- // will begin calling ResolveNow on the active name resolver with
- // exponential backoff until a subsequent call to UpdateClientConnState
- // returns a nil error. Any other errors are currently ignored.
- UpdateClientConnState(ClientConnState) error
- // ResolverError is called by gRPC when the name resolver reports an error.
- ResolverError(error)
- // UpdateSubConnState is called by gRPC when the state of a SubConn
- // changes.
- UpdateSubConnState(SubConn, SubConnState)
- // Close closes the balancer. The balancer is not required to call
- // ClientConn.RemoveSubConn for its existing SubConns.
- Close()
-}
-
// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
// and returns one aggregated connectivity state.
//
// It's not thread safe.
type ConnectivityStateEvaluator struct {
- numReady uint64 // Number of addrConns in ready state.
- numConnecting uint64 // Number of addrConns in connecting state.
+ numReady uint64 // Number of addrConns in ready state.
+ numConnecting uint64 // Number of addrConns in connecting state.
+ numTransientFailure uint64 // Number of addrConns in transient failure state.
+ numIdle uint64 // Number of addrConns in idle state.
}
// RecordTransition records state change happening in subConn and based on that
@@ -428,9 +387,11 @@
//
// - If at least one SubConn in Ready, the aggregated state is Ready;
// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
-// - Else the aggregated state is TransientFailure.
+// - Else if at least one SubConn is TransientFailure, the aggregated state is Transient Failure;
+// - Else if at least one SubConn is Idle, the aggregated state is Idle;
+// - Else there are no subconns and the aggregated state is Transient Failure
//
-// Idle and Shutdown are not considered.
+// Shutdown is not considered.
func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
// Update counters.
for idx, state := range []connectivity.State{oldState, newState} {
@@ -440,6 +401,10 @@
cse.numReady += updateVal
case connectivity.Connecting:
cse.numConnecting += updateVal
+ case connectivity.TransientFailure:
+ cse.numTransientFailure += updateVal
+ case connectivity.Idle:
+ cse.numIdle += updateVal
}
}
@@ -450,5 +415,11 @@
if cse.numConnecting > 0 {
return connectivity.Connecting
}
+ if cse.numTransientFailure > 0 {
+ return connectivity.TransientFailure
+ }
+ if cse.numIdle > 0 {
+ return connectivity.Idle
+ }
return connectivity.TransientFailure
}
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
index 80559b8..a67074a 100644
--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go
@@ -19,7 +19,6 @@
package base
import (
- "context"
"errors"
"fmt"
@@ -29,20 +28,20 @@
"google.golang.org/grpc/resolver"
)
+var logger = grpclog.Component("balancer")
+
type baseBuilder struct {
- name string
- pickerBuilder PickerBuilder
- v2PickerBuilder V2PickerBuilder
- config Config
+ name string
+ pickerBuilder PickerBuilder
+ config Config
}
func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
bal := &baseBalancer{
- cc: cc,
- pickerBuilder: bb.pickerBuilder,
- v2PickerBuilder: bb.v2PickerBuilder,
+ cc: cc,
+ pickerBuilder: bb.pickerBuilder,
- subConns: make(map[resolver.Address]balancer.SubConn),
+ subConns: resolver.NewAddressMap(),
scStates: make(map[balancer.SubConn]connectivity.State),
csEvltr: &balancer.ConnectivityStateEvaluator{},
config: bb.config,
@@ -50,11 +49,7 @@
// Initialize picker to a picker that always returns
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
// may call UpdateState with this picker.
- if bb.pickerBuilder != nil {
- bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable)
- } else {
- bal.v2Picker = NewErrPickerV2(balancer.ErrNoSubConnAvailable)
- }
+ bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable)
return bal
}
@@ -62,82 +57,73 @@
return bb.name
}
-var _ balancer.V2Balancer = (*baseBalancer)(nil) // Assert that we implement V2Balancer
-
type baseBalancer struct {
- cc balancer.ClientConn
- pickerBuilder PickerBuilder
- v2PickerBuilder V2PickerBuilder
+ cc balancer.ClientConn
+ pickerBuilder PickerBuilder
csEvltr *balancer.ConnectivityStateEvaluator
state connectivity.State
- subConns map[resolver.Address]balancer.SubConn
+ subConns *resolver.AddressMap
scStates map[balancer.SubConn]connectivity.State
picker balancer.Picker
- v2Picker balancer.V2Picker
config Config
resolverErr error // the last error reported by the resolver; cleared on successful resolution
connErr error // the last connection error; cleared upon leaving TransientFailure
}
-func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
- panic("not implemented")
-}
-
func (b *baseBalancer) ResolverError(err error) {
b.resolverErr = err
- if len(b.subConns) == 0 {
+ if b.subConns.Len() == 0 {
b.state = connectivity.TransientFailure
}
+
if b.state != connectivity.TransientFailure {
// The picker will not change since the balancer does not currently
// report an error.
return
}
b.regeneratePicker()
- if b.picker != nil {
- b.cc.UpdateBalancerState(b.state, b.picker)
- } else {
- b.cc.UpdateState(balancer.State{
- ConnectivityState: b.state,
- Picker: b.v2Picker,
- })
- }
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: b.state,
+ Picker: b.picker,
+ })
}
func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
- // TODO: handle s.ResolverState.Err (log if not nil) once implemented.
// TODO: handle s.ResolverState.ServiceConfig?
- if grpclog.V(2) {
- grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s)
+ if logger.V(2) {
+ logger.Info("base.baseBalancer: got new ClientConn state: ", s)
}
// Successful resolution; clear resolver error and ensure we return nil.
b.resolverErr = nil
// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
- addrsSet := make(map[resolver.Address]struct{})
+ addrsSet := resolver.NewAddressMap()
for _, a := range s.ResolverState.Addresses {
- addrsSet[a] = struct{}{}
- if _, ok := b.subConns[a]; !ok {
+ addrsSet.Set(a, nil)
+ if _, ok := b.subConns.Get(a); !ok {
// a is a new address (not existing in b.subConns).
sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck})
if err != nil {
- grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
+ logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
continue
}
- b.subConns[a] = sc
+ b.subConns.Set(a, sc)
b.scStates[sc] = connectivity.Idle
+ b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle)
sc.Connect()
}
}
- for a, sc := range b.subConns {
+ for _, a := range b.subConns.Keys() {
+ sci, _ := b.subConns.Get(a)
+ sc := sci.(balancer.SubConn)
// a was removed by resolver.
- if _, ok := addrsSet[a]; !ok {
+ if _, ok := addrsSet.Get(a); !ok {
b.cc.RemoveSubConn(sc)
- delete(b.subConns, a)
+ b.subConns.Delete(a)
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
- // The entry will be deleted in HandleSubConnStateChange.
+ // The entry will be deleted in UpdateSubConnState.
}
}
// If resolver state contains no addresses, return an error so ClientConn
@@ -171,56 +157,42 @@
// - built by the pickerBuilder with all READY SubConns otherwise.
func (b *baseBalancer) regeneratePicker() {
if b.state == connectivity.TransientFailure {
- if b.pickerBuilder != nil {
- b.picker = NewErrPicker(balancer.ErrTransientFailure)
- } else {
- b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(b.mergeErrors()))
- }
+ b.picker = NewErrPicker(b.mergeErrors())
return
}
- if b.pickerBuilder != nil {
- readySCs := make(map[resolver.Address]balancer.SubConn)
+ readySCs := make(map[balancer.SubConn]SubConnInfo)
- // Filter out all ready SCs from full subConn map.
- for addr, sc := range b.subConns {
- if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
- readySCs[addr] = sc
- }
+ // Filter out all ready SCs from full subConn map.
+ for _, addr := range b.subConns.Keys() {
+ sci, _ := b.subConns.Get(addr)
+ sc := sci.(balancer.SubConn)
+ if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
+ readySCs[sc] = SubConnInfo{Address: addr}
}
- b.picker = b.pickerBuilder.Build(readySCs)
- } else {
- readySCs := make(map[balancer.SubConn]SubConnInfo)
-
- // Filter out all ready SCs from full subConn map.
- for addr, sc := range b.subConns {
- if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
- readySCs[sc] = SubConnInfo{Address: addr}
- }
- }
- b.v2Picker = b.v2PickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
}
-}
-
-func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
- panic("not implemented")
+ b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
}
func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
s := state.ConnectivityState
- if grpclog.V(2) {
- grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
+ if logger.V(2) {
+ logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
}
oldS, ok := b.scStates[sc]
if !ok {
- if grpclog.V(2) {
- grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
+ if logger.V(2) {
+ logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
}
return
}
- if oldS == connectivity.TransientFailure && s == connectivity.Connecting {
- // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent
+ if oldS == connectivity.TransientFailure &&
+ (s == connectivity.Connecting || s == connectivity.Idle) {
+ // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or
// CONNECTING transitions to prevent the aggregated state from being
// always CONNECTING when many backends exist but are all down.
+ if s == connectivity.Idle {
+ sc.Connect()
+ }
return
}
b.scStates[sc] = s
@@ -246,12 +218,7 @@
b.state == connectivity.TransientFailure {
b.regeneratePicker()
}
-
- if b.picker != nil {
- b.cc.UpdateBalancerState(b.state, b.picker)
- } else {
- b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.v2Picker})
- }
+ b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker})
}
// Close is a nop because base balancer doesn't have internal state to clean up,
@@ -259,28 +226,25 @@
func (b *baseBalancer) Close() {
}
-// NewErrPicker returns a picker that always returns err on Pick().
+// ExitIdle is a nop because the base balancer attempts to stay connected to
+// all SubConns at all times.
+func (b *baseBalancer) ExitIdle() {
+}
+
+// NewErrPicker returns a Picker that always returns err on Pick().
func NewErrPicker(err error) balancer.Picker {
return &errPicker{err: err}
}
+// NewErrPickerV2 is temporarily defined for backward compatibility reasons.
+//
+// Deprecated: use NewErrPicker instead.
+var NewErrPickerV2 = NewErrPicker
+
type errPicker struct {
err error // Pick() always returns this err.
}
-func (p *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) {
- return nil, nil, p.err
-}
-
-// NewErrPickerV2 returns a V2Picker that always returns err on Pick().
-func NewErrPickerV2(err error) balancer.V2Picker {
- return &errPickerV2{err: err}
-}
-
-type errPickerV2 struct {
- err error // Pick() always returns this err.
-}
-
-func (p *errPickerV2) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
return balancer.PickResult{}, p.err
}
diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go
index 4192918..e31d76e 100644
--- a/vendor/google.golang.org/grpc/balancer/base/base.go
+++ b/vendor/google.golang.org/grpc/balancer/base/base.go
@@ -37,15 +37,8 @@
// PickerBuilder creates balancer.Picker.
type PickerBuilder interface {
- // Build takes a slice of ready SubConns, and returns a picker that will be
- // used by gRPC to pick a SubConn.
- Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker
-}
-
-// V2PickerBuilder creates balancer.V2Picker.
-type V2PickerBuilder interface {
// Build returns a picker that will be used by gRPC to pick a SubConn.
- Build(info PickerBuildInfo) balancer.V2Picker
+ Build(info PickerBuildInfo) balancer.Picker
}
// PickerBuildInfo contains information needed by the picker builder to
@@ -62,32 +55,17 @@
Address resolver.Address // the address used to create this SubConn
}
-// NewBalancerBuilder returns a balancer builder. The balancers
-// built by this builder will use the picker builder to build pickers.
-func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
- return NewBalancerBuilderWithConfig(name, pb, Config{})
-}
-
// Config contains the config info about the base balancer builder.
type Config struct {
// HealthCheck indicates whether health checking should be enabled for this specific balancer.
HealthCheck bool
}
-// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config.
-func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder {
+// NewBalancerBuilder returns a base balancer builder configured by the provided config.
+func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder {
return &baseBuilder{
name: name,
pickerBuilder: pb,
config: config,
}
}
-
-// NewBalancerBuilderV2 returns a base balancer builder configured by the provided config.
-func NewBalancerBuilderV2(name string, pb V2PickerBuilder, config Config) balancer.Builder {
- return &baseBuilder{
- name: name,
- v2PickerBuilder: pb,
- config: config,
- }
-}
diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go
new file mode 100644
index 0000000..4ecfa1c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go
@@ -0,0 +1,51 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package state declares grpclb types to be set by resolvers wishing to pass
+// information to grpclb via resolver.State Attributes.
+package state
+
+import (
+ "google.golang.org/grpc/resolver"
+)
+
+// keyType is the key to use for storing State in Attributes.
+type keyType string
+
+const key = keyType("grpc.grpclb.state")
+
+// State contains gRPCLB-relevant data passed from the name resolver.
+type State struct {
+ // BalancerAddresses contains the remote load balancer address(es). If
+ // set, overrides any resolver-provided addresses with Type of GRPCLB.
+ BalancerAddresses []resolver.Address
+}
+
+// Set returns a copy of the provided state with attributes containing s. s's
+// data should not be mutated after calling Set.
+func Set(state resolver.State, s *State) resolver.State {
+ state.Attributes = state.Attributes.WithValue(key, s)
+ return state
+}
+
+// Get returns the grpclb State in the resolver.State, or nil if not present.
+// The returned data should not be mutated.
+func Get(state resolver.State) *State {
+ s, _ := state.Attributes.Value(key).(*State)
+ return s
+}
diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
index d4d6455..274eb2f 100644
--- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
+++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
@@ -33,9 +33,11 @@
// Name is the name of round_robin balancer.
const Name = "round_robin"
+var logger = grpclog.Component("roundrobin")
+
// newBuilder creates a new roundrobin balancer builder.
func newBuilder() balancer.Builder {
- return base.NewBalancerBuilderV2(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
+ return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
}
func init() {
@@ -44,12 +46,12 @@
type rrPickerBuilder struct{}
-func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.V2Picker {
- grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info)
+func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
+ logger.Infof("roundrobinPicker: Build called with info: %v", info)
if len(info.ReadySCs) == 0 {
- return base.NewErrPickerV2(balancer.ErrNoSubConnAvailable)
+ return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
}
- var scs []balancer.SubConn
+ scs := make([]balancer.SubConn, 0, len(info.ReadySCs))
for sc := range info.ReadySCs {
scs = append(scs, sc)
}
diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
index f8667a2..f4ea617 100644
--- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
+++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
@@ -37,14 +37,20 @@
err error
}
+// exitIdle contains no data and is just a signal sent on the updateCh in
+// ccBalancerWrapper to instruct the balancer to exit idle.
+type exitIdle struct{}
+
// ccBalancerWrapper is a wrapper on top of cc for balancers.
// It implements balancer.ClientConn interface.
type ccBalancerWrapper struct {
- cc *ClientConn
- balancerMu sync.Mutex // synchronizes calls to the balancer
- balancer balancer.Balancer
- scBuffer *buffer.Unbounded
- done *grpcsync.Event
+ cc *ClientConn
+ balancerMu sync.Mutex // synchronizes calls to the balancer
+ balancer balancer.Balancer
+ hasExitIdle bool
+ updateCh *buffer.Unbounded
+ closed *grpcsync.Event
+ done *grpcsync.Event
mu sync.Mutex
subConns map[*acBalancerWrapper]struct{}
@@ -53,12 +59,14 @@
func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
ccb := &ccBalancerWrapper{
cc: cc,
- scBuffer: buffer.NewUnbounded(),
+ updateCh: buffer.NewUnbounded(),
+ closed: grpcsync.NewEvent(),
done: grpcsync.NewEvent(),
subConns: make(map[*acBalancerWrapper]struct{}),
}
go ccb.watcher()
ccb.balancer = b.Build(ccb, bopts)
+ _, ccb.hasExitIdle = ccb.balancer.(balancer.ExitIdler)
return ccb
}
@@ -67,39 +75,72 @@
func (ccb *ccBalancerWrapper) watcher() {
for {
select {
- case t := <-ccb.scBuffer.Get():
- ccb.scBuffer.Load()
- if ccb.done.HasFired() {
+ case t := <-ccb.updateCh.Get():
+ ccb.updateCh.Load()
+ if ccb.closed.HasFired() {
break
}
- ccb.balancerMu.Lock()
- su := t.(*scStateUpdate)
- if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
- ub.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err})
- } else {
- ccb.balancer.HandleSubConnStateChange(su.sc, su.state)
+ switch u := t.(type) {
+ case *scStateUpdate:
+ ccb.balancerMu.Lock()
+ ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err})
+ ccb.balancerMu.Unlock()
+ case *acBalancerWrapper:
+ ccb.mu.Lock()
+ if ccb.subConns != nil {
+ delete(ccb.subConns, u)
+ ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain)
+ }
+ ccb.mu.Unlock()
+ case exitIdle:
+ if ccb.cc.GetState() == connectivity.Idle {
+ if ei, ok := ccb.balancer.(balancer.ExitIdler); ok {
+ // We already checked that the balancer implements
+ // ExitIdle before pushing the event to updateCh, but
+ // check conditionally again as defensive programming.
+ ccb.balancerMu.Lock()
+ ei.ExitIdle()
+ ccb.balancerMu.Unlock()
+ }
+ }
+ default:
+ logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t)
}
- ccb.balancerMu.Unlock()
- case <-ccb.done.Done():
+ case <-ccb.closed.Done():
}
- if ccb.done.HasFired() {
+ if ccb.closed.HasFired() {
+ ccb.balancerMu.Lock()
ccb.balancer.Close()
+ ccb.balancerMu.Unlock()
ccb.mu.Lock()
scs := ccb.subConns
ccb.subConns = nil
ccb.mu.Unlock()
+ ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil})
+ ccb.done.Fire()
+ // Fire done before removing the addr conns. We can safely unblock
+ // ccb.close and allow the removeAddrConns to happen
+ // asynchronously.
for acbw := range scs {
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
}
- ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil})
return
}
}
}
func (ccb *ccBalancerWrapper) close() {
- ccb.done.Fire()
+ ccb.closed.Fire()
+ <-ccb.done.Done()
+}
+
+func (ccb *ccBalancerWrapper) exitIdle() bool {
+ if !ccb.hasExitIdle {
+ return false
+ }
+ ccb.updateCh.Put(exitIdle{})
+ return true
}
func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
@@ -113,7 +154,7 @@
if sc == nil {
return
}
- ccb.scBuffer.Put(&scStateUpdate{
+ ccb.updateCh.Put(&scStateUpdate{
sc: sc,
state: s,
err: err,
@@ -123,19 +164,13 @@
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
ccb.balancerMu.Lock()
defer ccb.balancerMu.Unlock()
- if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
- return ub.UpdateClientConnState(*ccs)
- }
- ccb.balancer.HandleResolvedAddrs(ccs.ResolverState.Addresses, nil)
- return nil
+ return ccb.balancer.UpdateClientConnState(*ccs)
}
func (ccb *ccBalancerWrapper) resolverError(err error) {
- if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
- ccb.balancerMu.Lock()
- ub.ResolverError(err)
- ccb.balancerMu.Unlock()
- }
+ ccb.balancerMu.Lock()
+ defer ccb.balancerMu.Unlock()
+ ccb.balancer.ResolverError(err)
}
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
@@ -160,32 +195,18 @@
}
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
+ // The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock
+ // during switchBalancer() if the old balancer calls RemoveSubConn() in its
+ // Close().
+ ccb.updateCh.Put(sc)
+}
+
+func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
acbw, ok := sc.(*acBalancerWrapper)
if !ok {
return
}
- ccb.mu.Lock()
- defer ccb.mu.Unlock()
- if ccb.subConns == nil {
- return
- }
- delete(ccb.subConns, acbw)
- ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
-}
-
-func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
- ccb.mu.Lock()
- defer ccb.mu.Unlock()
- if ccb.subConns == nil {
- return
- }
- // Update picker before updating state. Even though the ordering here does
- // not matter, it can lead to multiple calls of Pick in the common start-up
- // case where we wait for ready and then perform an RPC. If the picker is
- // updated later, we could call the "connecting" picker when the state is
- // updated, and then call the "ready" picker after the picker gets updated.
- ccb.cc.blockingpicker.updatePicker(p)
- ccb.cc.csMgr.updateState(s)
+ acbw.UpdateAddresses(addrs)
}
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
@@ -199,7 +220,7 @@
// case where we wait for ready and then perform an RPC. If the picker is
// updated later, we could call the "connecting" picker when the state is
// updated, and then call the "ready" picker after the picker gets updated.
- ccb.cc.blockingpicker.updatePickerV2(s.Picker)
+ ccb.cc.blockingpicker.updatePicker(s.Picker)
ccb.cc.csMgr.updateState(s.ConnectivityState)
}
@@ -222,7 +243,7 @@
acbw.mu.Lock()
defer acbw.mu.Unlock()
if len(addrs) <= 0 {
- acbw.ac.tearDown(errConnDrain)
+ acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain)
return
}
if !acbw.ac.tryUpdateAddrs(addrs) {
@@ -237,23 +258,23 @@
acbw.ac.acbw = nil
acbw.ac.mu.Unlock()
acState := acbw.ac.getState()
- acbw.ac.tearDown(errConnDrain)
+ acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain)
if acState == connectivity.Shutdown {
return
}
- ac, err := cc.newAddrConn(addrs, opts)
+ newAC, err := cc.newAddrConn(addrs, opts)
if err != nil {
- channelz.Warningf(acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
+ channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
return
}
- acbw.ac = ac
- ac.mu.Lock()
- ac.acbw = acbw
- ac.mu.Unlock()
+ acbw.ac = newAC
+ newAC.mu.Lock()
+ newAC.acbw = acbw
+ newAC.mu.Unlock()
if acState != connectivity.Idle {
- ac.connect()
+ go newAC.connect()
}
}
}
@@ -261,7 +282,7 @@
func (acbw *acBalancerWrapper) Connect() {
acbw.mu.Lock()
defer acbw.mu.Unlock()
- acbw.ac.connect()
+ go acbw.ac.connect()
}
func (acbw *acBalancerWrapper) getAddrConn() *addrConn {
diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
deleted file mode 100644
index db04b08..0000000
--- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
+++ /dev/null
@@ -1,334 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "sync"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/resolver"
-)
-
-type balancerWrapperBuilder struct {
- b Balancer // The v1 balancer.
-}
-
-func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
- bwb.b.Start(opts.Target.Endpoint, BalancerConfig{
- DialCreds: opts.DialCreds,
- Dialer: opts.Dialer,
- })
- _, pickfirst := bwb.b.(*pickFirst)
- bw := &balancerWrapper{
- balancer: bwb.b,
- pickfirst: pickfirst,
- cc: cc,
- targetAddr: opts.Target.Endpoint,
- startCh: make(chan struct{}),
- conns: make(map[resolver.Address]balancer.SubConn),
- connSt: make(map[balancer.SubConn]*scState),
- csEvltr: &balancer.ConnectivityStateEvaluator{},
- state: connectivity.Idle,
- }
- cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: bw})
- go bw.lbWatcher()
- return bw
-}
-
-func (bwb *balancerWrapperBuilder) Name() string {
- return "wrapper"
-}
-
-type scState struct {
- addr Address // The v1 address type.
- s connectivity.State
- down func(error)
-}
-
-type balancerWrapper struct {
- balancer Balancer // The v1 balancer.
- pickfirst bool
-
- cc balancer.ClientConn
- targetAddr string // Target without the scheme.
-
- mu sync.Mutex
- conns map[resolver.Address]balancer.SubConn
- connSt map[balancer.SubConn]*scState
- // This channel is closed when handling the first resolver result.
- // lbWatcher blocks until this is closed, to avoid race between
- // - NewSubConn is created, cc wants to notify balancer of state changes;
- // - Build hasn't return, cc doesn't have access to balancer.
- startCh chan struct{}
-
- // To aggregate the connectivity state.
- csEvltr *balancer.ConnectivityStateEvaluator
- state connectivity.State
-}
-
-// lbWatcher watches the Notify channel of the balancer and manages
-// connections accordingly.
-func (bw *balancerWrapper) lbWatcher() {
- <-bw.startCh
- notifyCh := bw.balancer.Notify()
- if notifyCh == nil {
- // There's no resolver in the balancer. Connect directly.
- a := resolver.Address{
- Addr: bw.targetAddr,
- Type: resolver.Backend,
- }
- sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
- if err != nil {
- grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
- } else {
- bw.mu.Lock()
- bw.conns[a] = sc
- bw.connSt[sc] = &scState{
- addr: Address{Addr: bw.targetAddr},
- s: connectivity.Idle,
- }
- bw.mu.Unlock()
- sc.Connect()
- }
- return
- }
-
- for addrs := range notifyCh {
- grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs)
- if bw.pickfirst {
- var (
- oldA resolver.Address
- oldSC balancer.SubConn
- )
- bw.mu.Lock()
- for oldA, oldSC = range bw.conns {
- break
- }
- bw.mu.Unlock()
- if len(addrs) <= 0 {
- if oldSC != nil {
- // Teardown old sc.
- bw.mu.Lock()
- delete(bw.conns, oldA)
- delete(bw.connSt, oldSC)
- bw.mu.Unlock()
- bw.cc.RemoveSubConn(oldSC)
- }
- continue
- }
-
- var newAddrs []resolver.Address
- for _, a := range addrs {
- newAddr := resolver.Address{
- Addr: a.Addr,
- Type: resolver.Backend, // All addresses from balancer are all backends.
- ServerName: "",
- Metadata: a.Metadata,
- }
- newAddrs = append(newAddrs, newAddr)
- }
- if oldSC == nil {
- // Create new sc.
- sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{})
- if err != nil {
- grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err)
- } else {
- bw.mu.Lock()
- // For pickfirst, there should be only one SubConn, so the
- // address doesn't matter. All states updating (up and down)
- // and picking should all happen on that only SubConn.
- bw.conns[resolver.Address{}] = sc
- bw.connSt[sc] = &scState{
- addr: addrs[0], // Use the first address.
- s: connectivity.Idle,
- }
- bw.mu.Unlock()
- sc.Connect()
- }
- } else {
- bw.mu.Lock()
- bw.connSt[oldSC].addr = addrs[0]
- bw.mu.Unlock()
- oldSC.UpdateAddresses(newAddrs)
- }
- } else {
- var (
- add []resolver.Address // Addresses need to setup connections.
- del []balancer.SubConn // Connections need to tear down.
- )
- resAddrs := make(map[resolver.Address]Address)
- for _, a := range addrs {
- resAddrs[resolver.Address{
- Addr: a.Addr,
- Type: resolver.Backend, // All addresses from balancer are all backends.
- ServerName: "",
- Metadata: a.Metadata,
- }] = a
- }
- bw.mu.Lock()
- for a := range resAddrs {
- if _, ok := bw.conns[a]; !ok {
- add = append(add, a)
- }
- }
- for a, c := range bw.conns {
- if _, ok := resAddrs[a]; !ok {
- del = append(del, c)
- delete(bw.conns, a)
- // Keep the state of this sc in bw.connSt until its state becomes Shutdown.
- }
- }
- bw.mu.Unlock()
- for _, a := range add {
- sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
- if err != nil {
- grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
- } else {
- bw.mu.Lock()
- bw.conns[a] = sc
- bw.connSt[sc] = &scState{
- addr: resAddrs[a],
- s: connectivity.Idle,
- }
- bw.mu.Unlock()
- sc.Connect()
- }
- }
- for _, c := range del {
- bw.cc.RemoveSubConn(c)
- }
- }
- }
-}
-
-func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
- bw.mu.Lock()
- defer bw.mu.Unlock()
- scSt, ok := bw.connSt[sc]
- if !ok {
- return
- }
- if s == connectivity.Idle {
- sc.Connect()
- }
- oldS := scSt.s
- scSt.s = s
- if oldS != connectivity.Ready && s == connectivity.Ready {
- scSt.down = bw.balancer.Up(scSt.addr)
- } else if oldS == connectivity.Ready && s != connectivity.Ready {
- if scSt.down != nil {
- scSt.down(errConnClosing)
- }
- }
- sa := bw.csEvltr.RecordTransition(oldS, s)
- if bw.state != sa {
- bw.state = sa
- }
- bw.cc.UpdateState(balancer.State{ConnectivityState: bw.state, Picker: bw})
- if s == connectivity.Shutdown {
- // Remove state for this sc.
- delete(bw.connSt, sc)
- }
-}
-
-func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
- bw.mu.Lock()
- defer bw.mu.Unlock()
- select {
- case <-bw.startCh:
- default:
- close(bw.startCh)
- }
- // There should be a resolver inside the balancer.
- // All updates here, if any, are ignored.
-}
-
-func (bw *balancerWrapper) Close() {
- bw.mu.Lock()
- defer bw.mu.Unlock()
- select {
- case <-bw.startCh:
- default:
- close(bw.startCh)
- }
- bw.balancer.Close()
-}
-
-// The picker is the balancerWrapper itself.
-// It either blocks or returns error, consistent with v1 balancer Get().
-func (bw *balancerWrapper) Pick(info balancer.PickInfo) (result balancer.PickResult, err error) {
- failfast := true // Default failfast is true.
- if ss, ok := rpcInfoFromContext(info.Ctx); ok {
- failfast = ss.failfast
- }
- a, p, err := bw.balancer.Get(info.Ctx, BalancerGetOptions{BlockingWait: !failfast})
- if err != nil {
- return balancer.PickResult{}, toRPCErr(err)
- }
- if p != nil {
- result.Done = func(balancer.DoneInfo) { p() }
- defer func() {
- if err != nil {
- p()
- }
- }()
- }
-
- bw.mu.Lock()
- defer bw.mu.Unlock()
- if bw.pickfirst {
- // Get the first sc in conns.
- for _, result.SubConn = range bw.conns {
- return result, nil
- }
- return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
- }
- var ok1 bool
- result.SubConn, ok1 = bw.conns[resolver.Address{
- Addr: a.Addr,
- Type: resolver.Backend,
- ServerName: "",
- Metadata: a.Metadata,
- }]
- s, ok2 := bw.connSt[result.SubConn]
- if !ok1 || !ok2 {
- // This can only happen due to a race where Get() returned an address
- // that was subsequently removed by Notify. In this case we should
- // retry always.
- return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
- }
- switch s.s {
- case connectivity.Ready, connectivity.Idle:
- return result, nil
- case connectivity.Shutdown, connectivity.TransientFailure:
- // If the returned sc has been shut down or is in transient failure,
- // return error, and this RPC will fail or wait for another picker (if
- // non-failfast).
- return balancer.PickResult{}, balancer.ErrTransientFailure
- default:
- // For other states (connecting or unknown), the v1 balancer would
- // traditionally wait until ready and then issue the RPC. Returning
- // ErrNoSubConnAvailable will be a slight improvement in that it will
- // allow the balancer to choose another address in case others are
- // connected.
- return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
- }
-}
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
index f393bb6..ed75290 100644
--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
+++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
@@ -1,24 +1,49 @@
+// Copyright 2018 The gRPC Authors
+// All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The canonical version of this proto can be found at
+// https://github.com/grpc/grpc-proto/blob/master/grpc/binlog/v1/binarylog.proto
+
// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.14.0
+// source: grpc/binlog/v1/binarylog.proto
-package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
+package grpc_binarylog_v1
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import duration "github.com/golang/protobuf/ptypes/duration"
-import timestamp "github.com/golang/protobuf/ptypes/timestamp"
+import (
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
+)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
// Enumerates the type of event
// Note the terminology is different from the RPC semantics
@@ -54,32 +79,55 @@
GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7
)
-var GrpcLogEntry_EventType_name = map[int32]string{
- 0: "EVENT_TYPE_UNKNOWN",
- 1: "EVENT_TYPE_CLIENT_HEADER",
- 2: "EVENT_TYPE_SERVER_HEADER",
- 3: "EVENT_TYPE_CLIENT_MESSAGE",
- 4: "EVENT_TYPE_SERVER_MESSAGE",
- 5: "EVENT_TYPE_CLIENT_HALF_CLOSE",
- 6: "EVENT_TYPE_SERVER_TRAILER",
- 7: "EVENT_TYPE_CANCEL",
-}
-var GrpcLogEntry_EventType_value = map[string]int32{
- "EVENT_TYPE_UNKNOWN": 0,
- "EVENT_TYPE_CLIENT_HEADER": 1,
- "EVENT_TYPE_SERVER_HEADER": 2,
- "EVENT_TYPE_CLIENT_MESSAGE": 3,
- "EVENT_TYPE_SERVER_MESSAGE": 4,
- "EVENT_TYPE_CLIENT_HALF_CLOSE": 5,
- "EVENT_TYPE_SERVER_TRAILER": 6,
- "EVENT_TYPE_CANCEL": 7,
+// Enum value maps for GrpcLogEntry_EventType.
+var (
+ GrpcLogEntry_EventType_name = map[int32]string{
+ 0: "EVENT_TYPE_UNKNOWN",
+ 1: "EVENT_TYPE_CLIENT_HEADER",
+ 2: "EVENT_TYPE_SERVER_HEADER",
+ 3: "EVENT_TYPE_CLIENT_MESSAGE",
+ 4: "EVENT_TYPE_SERVER_MESSAGE",
+ 5: "EVENT_TYPE_CLIENT_HALF_CLOSE",
+ 6: "EVENT_TYPE_SERVER_TRAILER",
+ 7: "EVENT_TYPE_CANCEL",
+ }
+ GrpcLogEntry_EventType_value = map[string]int32{
+ "EVENT_TYPE_UNKNOWN": 0,
+ "EVENT_TYPE_CLIENT_HEADER": 1,
+ "EVENT_TYPE_SERVER_HEADER": 2,
+ "EVENT_TYPE_CLIENT_MESSAGE": 3,
+ "EVENT_TYPE_SERVER_MESSAGE": 4,
+ "EVENT_TYPE_CLIENT_HALF_CLOSE": 5,
+ "EVENT_TYPE_SERVER_TRAILER": 6,
+ "EVENT_TYPE_CANCEL": 7,
+ }
+)
+
+func (x GrpcLogEntry_EventType) Enum() *GrpcLogEntry_EventType {
+ p := new(GrpcLogEntry_EventType)
+ *p = x
+ return p
}
func (x GrpcLogEntry_EventType) String() string {
- return proto.EnumName(GrpcLogEntry_EventType_name, int32(x))
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
+
+func (GrpcLogEntry_EventType) Descriptor() protoreflect.EnumDescriptor {
+ return file_grpc_binlog_v1_binarylog_proto_enumTypes[0].Descriptor()
+}
+
+func (GrpcLogEntry_EventType) Type() protoreflect.EnumType {
+ return &file_grpc_binlog_v1_binarylog_proto_enumTypes[0]
+}
+
+func (x GrpcLogEntry_EventType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use GrpcLogEntry_EventType.Descriptor instead.
func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0}
+ return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 0}
}
// Enumerates the entity that generates the log entry
@@ -91,22 +139,45 @@
GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2
)
-var GrpcLogEntry_Logger_name = map[int32]string{
- 0: "LOGGER_UNKNOWN",
- 1: "LOGGER_CLIENT",
- 2: "LOGGER_SERVER",
-}
-var GrpcLogEntry_Logger_value = map[string]int32{
- "LOGGER_UNKNOWN": 0,
- "LOGGER_CLIENT": 1,
- "LOGGER_SERVER": 2,
+// Enum value maps for GrpcLogEntry_Logger.
+var (
+ GrpcLogEntry_Logger_name = map[int32]string{
+ 0: "LOGGER_UNKNOWN",
+ 1: "LOGGER_CLIENT",
+ 2: "LOGGER_SERVER",
+ }
+ GrpcLogEntry_Logger_value = map[string]int32{
+ "LOGGER_UNKNOWN": 0,
+ "LOGGER_CLIENT": 1,
+ "LOGGER_SERVER": 2,
+ }
+)
+
+func (x GrpcLogEntry_Logger) Enum() *GrpcLogEntry_Logger {
+ p := new(GrpcLogEntry_Logger)
+ *p = x
+ return p
}
func (x GrpcLogEntry_Logger) String() string {
- return proto.EnumName(GrpcLogEntry_Logger_name, int32(x))
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
+
+func (GrpcLogEntry_Logger) Descriptor() protoreflect.EnumDescriptor {
+ return file_grpc_binlog_v1_binarylog_proto_enumTypes[1].Descriptor()
+}
+
+func (GrpcLogEntry_Logger) Type() protoreflect.EnumType {
+ return &file_grpc_binlog_v1_binarylog_proto_enumTypes[1]
+}
+
+func (x GrpcLogEntry_Logger) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use GrpcLogEntry_Logger.Descriptor instead.
func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1}
+ return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 1}
}
type Address_Type int32
@@ -122,30 +193,57 @@
Address_TYPE_UNIX Address_Type = 3
)
-var Address_Type_name = map[int32]string{
- 0: "TYPE_UNKNOWN",
- 1: "TYPE_IPV4",
- 2: "TYPE_IPV6",
- 3: "TYPE_UNIX",
-}
-var Address_Type_value = map[string]int32{
- "TYPE_UNKNOWN": 0,
- "TYPE_IPV4": 1,
- "TYPE_IPV6": 2,
- "TYPE_UNIX": 3,
+// Enum value maps for Address_Type.
+var (
+ Address_Type_name = map[int32]string{
+ 0: "TYPE_UNKNOWN",
+ 1: "TYPE_IPV4",
+ 2: "TYPE_IPV6",
+ 3: "TYPE_UNIX",
+ }
+ Address_Type_value = map[string]int32{
+ "TYPE_UNKNOWN": 0,
+ "TYPE_IPV4": 1,
+ "TYPE_IPV6": 2,
+ "TYPE_UNIX": 3,
+ }
+)
+
+func (x Address_Type) Enum() *Address_Type {
+ p := new(Address_Type)
+ *p = x
+ return p
}
func (x Address_Type) String() string {
- return proto.EnumName(Address_Type_name, int32(x))
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
+
+func (Address_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_grpc_binlog_v1_binarylog_proto_enumTypes[2].Descriptor()
+}
+
+func (Address_Type) Type() protoreflect.EnumType {
+ return &file_grpc_binlog_v1_binarylog_proto_enumTypes[2]
+}
+
+func (x Address_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Address_Type.Descriptor instead.
func (Address_Type) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0}
+ return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7, 0}
}
// Log entry we store in binary logs
type GrpcLogEntry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// The timestamp of the binary log message
- Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
// Uniquely identifies a call. The value must not be 0 in order to disambiguate
// from an unset value.
// Each call may have several log entries, they will all have the same call_id.
@@ -158,11 +256,11 @@
// durability or ordering is not guaranteed.
SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"`
Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"`
- Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"`
+ Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` // One of the above Logger enum
// The logger uses one of the following fields to record the payload,
// according to the type of the log entry.
//
- // Types that are valid to be assigned to Payload:
+ // Types that are assignable to Payload:
// *GrpcLogEntry_ClientHeader
// *GrpcLogEntry_ServerHeader
// *GrpcLogEntry_Message
@@ -175,71 +273,125 @@
// EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in
// the case of trailers-only. On server side, peer is always
// logged on EVENT_TYPE_CLIENT_HEADER.
- Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"`
}
-func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} }
-func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) }
-func (*GrpcLogEntry) ProtoMessage() {}
+func (x *GrpcLogEntry) Reset() {
+ *x = GrpcLogEntry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcLogEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcLogEntry) ProtoMessage() {}
+
+func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcLogEntry.ProtoReflect.Descriptor instead.
func (*GrpcLogEntry) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{0}
-}
-func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b)
-}
-func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic)
-}
-func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GrpcLogEntry.Merge(dst, src)
-}
-func (m *GrpcLogEntry) XXX_Size() int {
- return xxx_messageInfo_GrpcLogEntry.Size(m)
-}
-func (m *GrpcLogEntry) XXX_DiscardUnknown() {
- xxx_messageInfo_GrpcLogEntry.DiscardUnknown(m)
+ return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0}
}
-var xxx_messageInfo_GrpcLogEntry proto.InternalMessageInfo
-
-func (m *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp {
- if m != nil {
- return m.Timestamp
+func (x *GrpcLogEntry) GetTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.Timestamp
}
return nil
}
-func (m *GrpcLogEntry) GetCallId() uint64 {
- if m != nil {
- return m.CallId
+func (x *GrpcLogEntry) GetCallId() uint64 {
+ if x != nil {
+ return x.CallId
}
return 0
}
-func (m *GrpcLogEntry) GetSequenceIdWithinCall() uint64 {
- if m != nil {
- return m.SequenceIdWithinCall
+func (x *GrpcLogEntry) GetSequenceIdWithinCall() uint64 {
+ if x != nil {
+ return x.SequenceIdWithinCall
}
return 0
}
-func (m *GrpcLogEntry) GetType() GrpcLogEntry_EventType {
- if m != nil {
- return m.Type
+func (x *GrpcLogEntry) GetType() GrpcLogEntry_EventType {
+ if x != nil {
+ return x.Type
}
return GrpcLogEntry_EVENT_TYPE_UNKNOWN
}
-func (m *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger {
- if m != nil {
- return m.Logger
+func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger {
+ if x != nil {
+ return x.Logger
}
return GrpcLogEntry_LOGGER_UNKNOWN
}
+func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (x *GrpcLogEntry) GetClientHeader() *ClientHeader {
+ if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok {
+ return x.ClientHeader
+ }
+ return nil
+}
+
+func (x *GrpcLogEntry) GetServerHeader() *ServerHeader {
+ if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok {
+ return x.ServerHeader
+ }
+ return nil
+}
+
+func (x *GrpcLogEntry) GetMessage() *Message {
+ if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok {
+ return x.Message
+ }
+ return nil
+}
+
+func (x *GrpcLogEntry) GetTrailer() *Trailer {
+ if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok {
+ return x.Trailer
+ }
+ return nil
+}
+
+func (x *GrpcLogEntry) GetPayloadTruncated() bool {
+ if x != nil {
+ return x.PayloadTruncated
+ }
+ return false
+}
+
+func (x *GrpcLogEntry) GetPeer() *Address {
+ if x != nil {
+ return x.Peer
+ }
+ return nil
+}
+
type isGrpcLogEntry_Payload interface {
isGrpcLogEntry_Payload()
}
@@ -253,6 +405,7 @@
}
type GrpcLogEntry_Message struct {
+ // Used by EVENT_TYPE_CLIENT_MESSAGE, EVENT_TYPE_SERVER_MESSAGE
Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"`
}
@@ -268,168 +421,11 @@
func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {}
-func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload {
- if m != nil {
- return m.Payload
- }
- return nil
-}
-
-func (m *GrpcLogEntry) GetClientHeader() *ClientHeader {
- if x, ok := m.GetPayload().(*GrpcLogEntry_ClientHeader); ok {
- return x.ClientHeader
- }
- return nil
-}
-
-func (m *GrpcLogEntry) GetServerHeader() *ServerHeader {
- if x, ok := m.GetPayload().(*GrpcLogEntry_ServerHeader); ok {
- return x.ServerHeader
- }
- return nil
-}
-
-func (m *GrpcLogEntry) GetMessage() *Message {
- if x, ok := m.GetPayload().(*GrpcLogEntry_Message); ok {
- return x.Message
- }
- return nil
-}
-
-func (m *GrpcLogEntry) GetTrailer() *Trailer {
- if x, ok := m.GetPayload().(*GrpcLogEntry_Trailer); ok {
- return x.Trailer
- }
- return nil
-}
-
-func (m *GrpcLogEntry) GetPayloadTruncated() bool {
- if m != nil {
- return m.PayloadTruncated
- }
- return false
-}
-
-func (m *GrpcLogEntry) GetPeer() *Address {
- if m != nil {
- return m.Peer
- }
- return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
- return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{
- (*GrpcLogEntry_ClientHeader)(nil),
- (*GrpcLogEntry_ServerHeader)(nil),
- (*GrpcLogEntry_Message)(nil),
- (*GrpcLogEntry_Trailer)(nil),
- }
-}
-
-func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
- m := msg.(*GrpcLogEntry)
- // payload
- switch x := m.Payload.(type) {
- case *GrpcLogEntry_ClientHeader:
- b.EncodeVarint(6<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.ClientHeader); err != nil {
- return err
- }
- case *GrpcLogEntry_ServerHeader:
- b.EncodeVarint(7<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.ServerHeader); err != nil {
- return err
- }
- case *GrpcLogEntry_Message:
- b.EncodeVarint(8<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.Message); err != nil {
- return err
- }
- case *GrpcLogEntry_Trailer:
- b.EncodeVarint(9<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.Trailer); err != nil {
- return err
- }
- case nil:
- default:
- return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x)
- }
- return nil
-}
-
-func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
- m := msg.(*GrpcLogEntry)
- switch tag {
- case 6: // payload.client_header
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(ClientHeader)
- err := b.DecodeMessage(msg)
- m.Payload = &GrpcLogEntry_ClientHeader{msg}
- return true, err
- case 7: // payload.server_header
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(ServerHeader)
- err := b.DecodeMessage(msg)
- m.Payload = &GrpcLogEntry_ServerHeader{msg}
- return true, err
- case 8: // payload.message
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(Message)
- err := b.DecodeMessage(msg)
- m.Payload = &GrpcLogEntry_Message{msg}
- return true, err
- case 9: // payload.trailer
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(Trailer)
- err := b.DecodeMessage(msg)
- m.Payload = &GrpcLogEntry_Trailer{msg}
- return true, err
- default:
- return false, nil
- }
-}
-
-func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) {
- m := msg.(*GrpcLogEntry)
- // payload
- switch x := m.Payload.(type) {
- case *GrpcLogEntry_ClientHeader:
- s := proto.Size(x.ClientHeader)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case *GrpcLogEntry_ServerHeader:
- s := proto.Size(x.ServerHeader)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case *GrpcLogEntry_Message:
- s := proto.Size(x.Message)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case *GrpcLogEntry_Trailer:
- s := proto.Size(x.Trailer)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case nil:
- default:
- panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
- }
- return n
-}
-
type ClientHeader struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// This contains only the metadata from the application.
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
// The name of the RPC method, which looks something like:
@@ -443,104 +439,122 @@
// <host> or <host>:<port> .
Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`
// the RPC timeout
- Timeout *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
}
-func (m *ClientHeader) Reset() { *m = ClientHeader{} }
-func (m *ClientHeader) String() string { return proto.CompactTextString(m) }
-func (*ClientHeader) ProtoMessage() {}
+func (x *ClientHeader) Reset() {
+ *x = ClientHeader{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ClientHeader) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ClientHeader) ProtoMessage() {}
+
+func (x *ClientHeader) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ClientHeader.ProtoReflect.Descriptor instead.
func (*ClientHeader) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{1}
-}
-func (m *ClientHeader) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ClientHeader.Unmarshal(m, b)
-}
-func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic)
-}
-func (dst *ClientHeader) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClientHeader.Merge(dst, src)
-}
-func (m *ClientHeader) XXX_Size() int {
- return xxx_messageInfo_ClientHeader.Size(m)
-}
-func (m *ClientHeader) XXX_DiscardUnknown() {
- xxx_messageInfo_ClientHeader.DiscardUnknown(m)
+ return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{1}
}
-var xxx_messageInfo_ClientHeader proto.InternalMessageInfo
-
-func (m *ClientHeader) GetMetadata() *Metadata {
- if m != nil {
- return m.Metadata
+func (x *ClientHeader) GetMetadata() *Metadata {
+ if x != nil {
+ return x.Metadata
}
return nil
}
-func (m *ClientHeader) GetMethodName() string {
- if m != nil {
- return m.MethodName
+func (x *ClientHeader) GetMethodName() string {
+ if x != nil {
+ return x.MethodName
}
return ""
}
-func (m *ClientHeader) GetAuthority() string {
- if m != nil {
- return m.Authority
+func (x *ClientHeader) GetAuthority() string {
+ if x != nil {
+ return x.Authority
}
return ""
}
-func (m *ClientHeader) GetTimeout() *duration.Duration {
- if m != nil {
- return m.Timeout
+func (x *ClientHeader) GetTimeout() *durationpb.Duration {
+ if x != nil {
+ return x.Timeout
}
return nil
}
type ServerHeader struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// This contains only the metadata from the application.
- Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
}
-func (m *ServerHeader) Reset() { *m = ServerHeader{} }
-func (m *ServerHeader) String() string { return proto.CompactTextString(m) }
-func (*ServerHeader) ProtoMessage() {}
+func (x *ServerHeader) Reset() {
+ *x = ServerHeader{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ServerHeader) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServerHeader) ProtoMessage() {}
+
+func (x *ServerHeader) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServerHeader.ProtoReflect.Descriptor instead.
func (*ServerHeader) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{2}
-}
-func (m *ServerHeader) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ServerHeader.Unmarshal(m, b)
-}
-func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic)
-}
-func (dst *ServerHeader) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServerHeader.Merge(dst, src)
-}
-func (m *ServerHeader) XXX_Size() int {
- return xxx_messageInfo_ServerHeader.Size(m)
-}
-func (m *ServerHeader) XXX_DiscardUnknown() {
- xxx_messageInfo_ServerHeader.DiscardUnknown(m)
+ return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{2}
}
-var xxx_messageInfo_ServerHeader proto.InternalMessageInfo
-
-func (m *ServerHeader) GetMetadata() *Metadata {
- if m != nil {
- return m.Metadata
+func (x *ServerHeader) GetMetadata() *Metadata {
+ if x != nil {
+ return x.Metadata
}
return nil
}
type Trailer struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// This contains only the metadata from the application.
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
// The gRPC status code.
@@ -550,110 +564,124 @@
StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"`
// The value of the 'grpc-status-details-bin' metadata key. If
// present, this is always an encoded 'google.rpc.Status' message.
- StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"`
}
-func (m *Trailer) Reset() { *m = Trailer{} }
-func (m *Trailer) String() string { return proto.CompactTextString(m) }
-func (*Trailer) ProtoMessage() {}
+func (x *Trailer) Reset() {
+ *x = Trailer{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Trailer) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Trailer) ProtoMessage() {}
+
+func (x *Trailer) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Trailer.ProtoReflect.Descriptor instead.
func (*Trailer) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{3}
-}
-func (m *Trailer) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Trailer.Unmarshal(m, b)
-}
-func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Trailer.Marshal(b, m, deterministic)
-}
-func (dst *Trailer) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Trailer.Merge(dst, src)
-}
-func (m *Trailer) XXX_Size() int {
- return xxx_messageInfo_Trailer.Size(m)
-}
-func (m *Trailer) XXX_DiscardUnknown() {
- xxx_messageInfo_Trailer.DiscardUnknown(m)
+ return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{3}
}
-var xxx_messageInfo_Trailer proto.InternalMessageInfo
-
-func (m *Trailer) GetMetadata() *Metadata {
- if m != nil {
- return m.Metadata
+func (x *Trailer) GetMetadata() *Metadata {
+ if x != nil {
+ return x.Metadata
}
return nil
}
-func (m *Trailer) GetStatusCode() uint32 {
- if m != nil {
- return m.StatusCode
+func (x *Trailer) GetStatusCode() uint32 {
+ if x != nil {
+ return x.StatusCode
}
return 0
}
-func (m *Trailer) GetStatusMessage() string {
- if m != nil {
- return m.StatusMessage
+func (x *Trailer) GetStatusMessage() string {
+ if x != nil {
+ return x.StatusMessage
}
return ""
}
-func (m *Trailer) GetStatusDetails() []byte {
- if m != nil {
- return m.StatusDetails
+func (x *Trailer) GetStatusDetails() []byte {
+ if x != nil {
+ return x.StatusDetails
}
return nil
}
// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE
type Message struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// Length of the message. It may not be the same as the length of the
// data field, as the logging payload can be truncated or omitted.
Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"`
// May be truncated or omitted.
- Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
}
-func (m *Message) Reset() { *m = Message{} }
-func (m *Message) String() string { return proto.CompactTextString(m) }
-func (*Message) ProtoMessage() {}
+func (x *Message) Reset() {
+ *x = Message{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Message) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Message) ProtoMessage() {}
+
+func (x *Message) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Message.ProtoReflect.Descriptor instead.
func (*Message) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{4}
-}
-func (m *Message) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Message.Unmarshal(m, b)
-}
-func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Message.Marshal(b, m, deterministic)
-}
-func (dst *Message) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Message.Merge(dst, src)
-}
-func (m *Message) XXX_Size() int {
- return xxx_messageInfo_Message.Size(m)
-}
-func (m *Message) XXX_DiscardUnknown() {
- xxx_messageInfo_Message.DiscardUnknown(m)
+ return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{4}
}
-var xxx_messageInfo_Message proto.InternalMessageInfo
-
-func (m *Message) GetLength() uint32 {
- if m != nil {
- return m.Length
+func (x *Message) GetLength() uint32 {
+ if x != nil {
+ return x.Length
}
return 0
}
-func (m *Message) GetData() []byte {
- if m != nil {
- return m.Data
+func (x *Message) GetData() []byte {
+ if x != nil {
+ return x.Data
}
return nil
}
@@ -680,221 +708,480 @@
// header is just a normal metadata key.
// The pair will not count towards the size limit.
type Metadata struct {
- Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
}
-func (m *Metadata) Reset() { *m = Metadata{} }
-func (m *Metadata) String() string { return proto.CompactTextString(m) }
-func (*Metadata) ProtoMessage() {}
+func (x *Metadata) Reset() {
+ *x = Metadata{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Metadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Metadata) ProtoMessage() {}
+
+func (x *Metadata) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Metadata.ProtoReflect.Descriptor instead.
func (*Metadata) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{5}
-}
-func (m *Metadata) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Metadata.Unmarshal(m, b)
-}
-func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
-}
-func (dst *Metadata) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Metadata.Merge(dst, src)
-}
-func (m *Metadata) XXX_Size() int {
- return xxx_messageInfo_Metadata.Size(m)
-}
-func (m *Metadata) XXX_DiscardUnknown() {
- xxx_messageInfo_Metadata.DiscardUnknown(m)
+ return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{5}
}
-var xxx_messageInfo_Metadata proto.InternalMessageInfo
-
-func (m *Metadata) GetEntry() []*MetadataEntry {
- if m != nil {
- return m.Entry
+func (x *Metadata) GetEntry() []*MetadataEntry {
+ if x != nil {
+ return x.Entry
}
return nil
}
// A metadata key value pair
type MetadataEntry struct {
- Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
-func (m *MetadataEntry) Reset() { *m = MetadataEntry{} }
-func (m *MetadataEntry) String() string { return proto.CompactTextString(m) }
-func (*MetadataEntry) ProtoMessage() {}
+func (x *MetadataEntry) Reset() {
+ *x = MetadataEntry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MetadataEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetadataEntry) ProtoMessage() {}
+
+func (x *MetadataEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetadataEntry.ProtoReflect.Descriptor instead.
func (*MetadataEntry) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{6}
-}
-func (m *MetadataEntry) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_MetadataEntry.Unmarshal(m, b)
-}
-func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic)
-}
-func (dst *MetadataEntry) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetadataEntry.Merge(dst, src)
-}
-func (m *MetadataEntry) XXX_Size() int {
- return xxx_messageInfo_MetadataEntry.Size(m)
-}
-func (m *MetadataEntry) XXX_DiscardUnknown() {
- xxx_messageInfo_MetadataEntry.DiscardUnknown(m)
+ return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{6}
}
-var xxx_messageInfo_MetadataEntry proto.InternalMessageInfo
-
-func (m *MetadataEntry) GetKey() string {
- if m != nil {
- return m.Key
+func (x *MetadataEntry) GetKey() string {
+ if x != nil {
+ return x.Key
}
return ""
}
-func (m *MetadataEntry) GetValue() []byte {
- if m != nil {
- return m.Value
+func (x *MetadataEntry) GetValue() []byte {
+ if x != nil {
+ return x.Value
}
return nil
}
// Address information
type Address struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"`
Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
// only for TYPE_IPV4 and TYPE_IPV6
- IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"`
}
-func (m *Address) Reset() { *m = Address{} }
-func (m *Address) String() string { return proto.CompactTextString(m) }
-func (*Address) ProtoMessage() {}
+func (x *Address) Reset() {
+ *x = Address{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Address) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Address) ProtoMessage() {}
+
+func (x *Address) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Address.ProtoReflect.Descriptor instead.
func (*Address) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{7}
-}
-func (m *Address) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Address.Unmarshal(m, b)
-}
-func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Address.Marshal(b, m, deterministic)
-}
-func (dst *Address) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Address.Merge(dst, src)
-}
-func (m *Address) XXX_Size() int {
- return xxx_messageInfo_Address.Size(m)
-}
-func (m *Address) XXX_DiscardUnknown() {
- xxx_messageInfo_Address.DiscardUnknown(m)
+ return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7}
}
-var xxx_messageInfo_Address proto.InternalMessageInfo
-
-func (m *Address) GetType() Address_Type {
- if m != nil {
- return m.Type
+func (x *Address) GetType() Address_Type {
+ if x != nil {
+ return x.Type
}
return Address_TYPE_UNKNOWN
}
-func (m *Address) GetAddress() string {
- if m != nil {
- return m.Address
+func (x *Address) GetAddress() string {
+ if x != nil {
+ return x.Address
}
return ""
}
-func (m *Address) GetIpPort() uint32 {
- if m != nil {
- return m.IpPort
+func (x *Address) GetIpPort() uint32 {
+ if x != nil {
+ return x.IpPort
}
return 0
}
-func init() {
- proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry")
- proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader")
- proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader")
- proto.RegisterType((*Trailer)(nil), "grpc.binarylog.v1.Trailer")
- proto.RegisterType((*Message)(nil), "grpc.binarylog.v1.Message")
- proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata")
- proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry")
- proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address")
- proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value)
- proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value)
- proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value)
+var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor
+
+var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{
+ 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31,
+ 0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
+ 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12,
+ 0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75,
+ 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63,
+ 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65,
+ 0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12,
+ 0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e,
+ 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76,
+ 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45,
+ 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e,
+ 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26,
+ 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e,
+ 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e,
+ 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46,
+ 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e,
+ 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+ 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+ 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e,
+ 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76,
+ 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00,
+ 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36,
+ 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
+ 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65,
+ 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62,
+ 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69,
+ 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b,
+ 0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61,
+ 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f,
+ 0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70,
+ 0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63,
+ 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64,
+ 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09,
+ 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45,
+ 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
+ 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
+ 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12,
+ 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45,
+ 0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a,
+ 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45,
+ 0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19,
+ 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45,
+ 0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45,
+ 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54,
+ 0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a,
+ 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56,
+ 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11,
+ 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45,
+ 0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a,
+ 0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
+ 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45,
+ 0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53,
+ 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f,
+ 0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61,
+ 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e,
+ 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
+ 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a,
+ 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74,
+ 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
+ 0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79,
+ 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52,
+ 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72,
+ 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+ 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62,
+ 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61,
+ 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f,
+ 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12,
+ 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a,
+ 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67,
+ 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
+ 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04,
+ 0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
+ 0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
+ 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61,
+ 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72,
+ 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e,
+ 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79,
+ 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07,
+ 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69,
+ 0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a,
+ 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
+ 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d,
+ 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a,
+ 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14,
+ 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f,
+ 0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
+ 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62,
+ 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69,
+ 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
}
-func init() {
- proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911)
+var (
+ file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once
+ file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc
+)
+
+func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte {
+ file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() {
+ file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData)
+ })
+ return file_grpc_binlog_v1_binarylog_proto_rawDescData
}
-var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{
- // 900 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44,
- 0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04,
- 0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d,
- 0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c,
- 0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf,
- 0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2,
- 0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09,
- 0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e,
- 0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef,
- 0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36,
- 0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5,
- 0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46,
- 0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84,
- 0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72,
- 0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa,
- 0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb,
- 0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84,
- 0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1,
- 0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c,
- 0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24,
- 0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba,
- 0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8,
- 0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5,
- 0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1,
- 0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94,
- 0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f,
- 0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec,
- 0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b,
- 0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1,
- 0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5,
- 0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b,
- 0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d,
- 0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42,
- 0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4,
- 0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd,
- 0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51,
- 0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01,
- 0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58,
- 0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5,
- 0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff,
- 0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26,
- 0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23,
- 0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44,
- 0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46,
- 0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf,
- 0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab,
- 0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32,
- 0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49,
- 0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb,
- 0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c,
- 0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0,
- 0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed,
- 0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f,
- 0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7,
- 0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e,
- 0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50,
- 0xd4, 0x07, 0x00, 0x00,
+var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
+var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
+var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{
+ (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType
+ (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger
+ (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type
+ (*GrpcLogEntry)(nil), // 3: grpc.binarylog.v1.GrpcLogEntry
+ (*ClientHeader)(nil), // 4: grpc.binarylog.v1.ClientHeader
+ (*ServerHeader)(nil), // 5: grpc.binarylog.v1.ServerHeader
+ (*Trailer)(nil), // 6: grpc.binarylog.v1.Trailer
+ (*Message)(nil), // 7: grpc.binarylog.v1.Message
+ (*Metadata)(nil), // 8: grpc.binarylog.v1.Metadata
+ (*MetadataEntry)(nil), // 9: grpc.binarylog.v1.MetadataEntry
+ (*Address)(nil), // 10: grpc.binarylog.v1.Address
+ (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp
+ (*durationpb.Duration)(nil), // 12: google.protobuf.Duration
+}
+var file_grpc_binlog_v1_binarylog_proto_depIdxs = []int32{
+ 11, // 0: grpc.binarylog.v1.GrpcLogEntry.timestamp:type_name -> google.protobuf.Timestamp
+ 0, // 1: grpc.binarylog.v1.GrpcLogEntry.type:type_name -> grpc.binarylog.v1.GrpcLogEntry.EventType
+ 1, // 2: grpc.binarylog.v1.GrpcLogEntry.logger:type_name -> grpc.binarylog.v1.GrpcLogEntry.Logger
+ 4, // 3: grpc.binarylog.v1.GrpcLogEntry.client_header:type_name -> grpc.binarylog.v1.ClientHeader
+ 5, // 4: grpc.binarylog.v1.GrpcLogEntry.server_header:type_name -> grpc.binarylog.v1.ServerHeader
+ 7, // 5: grpc.binarylog.v1.GrpcLogEntry.message:type_name -> grpc.binarylog.v1.Message
+ 6, // 6: grpc.binarylog.v1.GrpcLogEntry.trailer:type_name -> grpc.binarylog.v1.Trailer
+ 10, // 7: grpc.binarylog.v1.GrpcLogEntry.peer:type_name -> grpc.binarylog.v1.Address
+ 8, // 8: grpc.binarylog.v1.ClientHeader.metadata:type_name -> grpc.binarylog.v1.Metadata
+ 12, // 9: grpc.binarylog.v1.ClientHeader.timeout:type_name -> google.protobuf.Duration
+ 8, // 10: grpc.binarylog.v1.ServerHeader.metadata:type_name -> grpc.binarylog.v1.Metadata
+ 8, // 11: grpc.binarylog.v1.Trailer.metadata:type_name -> grpc.binarylog.v1.Metadata
+ 9, // 12: grpc.binarylog.v1.Metadata.entry:type_name -> grpc.binarylog.v1.MetadataEntry
+ 2, // 13: grpc.binarylog.v1.Address.type:type_name -> grpc.binarylog.v1.Address.Type
+ 14, // [14:14] is the sub-list for method output_type
+ 14, // [14:14] is the sub-list for method input_type
+ 14, // [14:14] is the sub-list for extension type_name
+ 14, // [14:14] is the sub-list for extension extendee
+ 0, // [0:14] is the sub-list for field type_name
+}
+
+func init() { file_grpc_binlog_v1_binarylog_proto_init() }
+func file_grpc_binlog_v1_binarylog_proto_init() {
+ if File_grpc_binlog_v1_binarylog_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcLogEntry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ClientHeader); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ServerHeader); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Trailer); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Message); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Metadata); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MetadataEntry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Address); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*GrpcLogEntry_ClientHeader)(nil),
+ (*GrpcLogEntry_ServerHeader)(nil),
+ (*GrpcLogEntry_Message)(nil),
+ (*GrpcLogEntry_Trailer)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc,
+ NumEnums: 3,
+ NumMessages: 8,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_grpc_binlog_v1_binarylog_proto_goTypes,
+ DependencyIndexes: file_grpc_binlog_v1_binarylog_proto_depIdxs,
+ EnumInfos: file_grpc_binlog_v1_binarylog_proto_enumTypes,
+ MessageInfos: file_grpc_binlog_v1_binarylog_proto_msgTypes,
+ }.Build()
+ File_grpc_binlog_v1_binarylog_proto = out.File
+ file_grpc_binlog_v1_binarylog_proto_rawDesc = nil
+ file_grpc_binlog_v1_binarylog_proto_goTypes = nil
+ file_grpc_binlog_v1_binarylog_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index 0740693..28f09dc 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -23,7 +23,7 @@
"errors"
"fmt"
"math"
- "net"
+ "net/url"
"reflect"
"strings"
"sync"
@@ -38,7 +38,7 @@
"google.golang.org/grpc/internal/backoff"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
- "google.golang.org/grpc/internal/grpcutil"
+ iresolver "google.golang.org/grpc/internal/resolver"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/resolver"
@@ -48,6 +48,7 @@
_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
_ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver.
_ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver.
+ _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver.
)
const (
@@ -68,8 +69,6 @@
errConnDrain = errors.New("grpc: the connection is drained")
// errConnClosing indicates that the connection is closing.
errConnClosing = errors.New("grpc: the connection is closing")
- // errBalancerClosed indicates that the balancer is closed.
- errBalancerClosed = errors.New("grpc: balancer is closed")
// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
// service config.
invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
@@ -84,13 +83,13 @@
// errTransportCredsAndBundle indicates that creds bundle is used together
// with other individual Transport Credentials.
errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials")
- // errTransportCredentialsMissing indicates that users want to transmit security
- // information (e.g., OAuth2 token) which requires secure connection on an insecure
- // connection.
+ // errNoTransportCredsInBundle indicated that the configured creds bundle
+ // returned a transport credentials which was nil.
+ errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials")
+ // errTransportCredentialsMissing indicates that users want to transmit
+ // security information (e.g., OAuth2 token) which requires secure
+ // connection on an insecure connection.
errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)")
- // errCredentialsConflict indicates that grpc.WithTransportCredentials()
- // and grpc.WithInsecure() are both called for a connection.
- errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)")
)
const (
@@ -106,6 +105,17 @@
return DialContext(context.Background(), target, opts...)
}
+type defaultConfigSelector struct {
+ sc *ServiceConfig
+}
+
+func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) {
+ return &iresolver.RPCConfig{
+ Context: rpcInfo.Context,
+ MethodConfig: getMethodConfig(dcs.sc, rpcInfo.Method),
+ }, nil
+}
+
// DialContext creates a client connection to the given target. By default, it's
// a non-blocking dial (the function won't wait for connections to be
// established, and connecting happens in the background). To make it a blocking
@@ -133,6 +143,7 @@
firstResolveEvent: grpcsync.NewEvent(),
}
cc.retryThrottler.Store((*retryThrottler)(nil))
+ cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
cc.ctx, cc.cancel = context.WithCancel(context.Background())
for _, opt := range opts {
@@ -151,32 +162,35 @@
if channelz.IsOn() {
if cc.dopts.channelzParentID != 0 {
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
- channelz.AddTraceEvent(cc.channelzID, 0, &channelz.TraceEventDesc{
+ channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{
Desc: "Channel Created",
- Severity: channelz.CtINFO,
+ Severity: channelz.CtInfo,
Parent: &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID),
- Severity: channelz.CtINFO,
+ Severity: channelz.CtInfo,
},
})
} else {
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target)
- channelz.Info(cc.channelzID, "Channel Created")
+ channelz.Info(logger, cc.channelzID, "Channel Created")
}
cc.csMgr.channelzID = cc.channelzID
}
- if !cc.dopts.insecure {
- if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
- return nil, errNoTransportSecurity
- }
- if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
- return nil, errTransportCredsAndBundle
- }
- } else {
- if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil {
- return nil, errCredentialsConflict
- }
+ if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
+ return nil, errNoTransportSecurity
+ }
+ if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
+ return nil, errTransportCredsAndBundle
+ }
+ if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil {
+ return nil, errNoTransportCredsInBundle
+ }
+ transportCreds := cc.dopts.copts.TransportCredentials
+ if transportCreds == nil {
+ transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials()
+ }
+ if transportCreds.Info().SecurityProtocol == "insecure" {
for _, cd := range cc.dopts.copts.PerRPCCredentials {
if cd.RequireTransportSecurity() {
return nil, errTransportCredentialsMissing
@@ -193,16 +207,6 @@
}
cc.mkp = cc.dopts.copts.KeepaliveParams
- if cc.dopts.copts.Dialer == nil {
- cc.dopts.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) {
- network, addr := parseDialTarget(addr)
- return (&net.Dialer{}).DialContext(ctx, network, addr)
- }
- if cc.dopts.withProxy {
- cc.dopts.copts.Dialer = newProxyDialer(cc.dopts.copts.Dialer)
- }
- }
-
if cc.dopts.copts.UserAgent != "" {
cc.dopts.copts.UserAgent += " " + grpcUA
} else {
@@ -217,7 +221,14 @@
defer func() {
select {
case <-ctx.Done():
- conn, err = nil, ctx.Err()
+ switch {
+ case ctx.Err() == err:
+ conn = nil
+ case err == nil || !cc.dopts.returnLastError:
+ conn, err = nil, ctx.Err()
+ default:
+ conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err)
+ }
default:
}
}()
@@ -229,6 +240,7 @@
case sc, ok := <-cc.dopts.scChan:
if ok {
cc.sc = &sc
+ cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc})
scSet = true
}
default:
@@ -239,34 +251,15 @@
}
// Determine the resolver to use.
- cc.parsedTarget = grpcutil.ParseTarget(cc.target)
- channelz.Infof(cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme)
- resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme)
- if resolverBuilder == nil {
- // If resolver builder is still nil, the parsed target's scheme is
- // not registered. Fallback to default resolver and set Endpoint to
- // the original target.
- channelz.Infof(cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
- cc.parsedTarget = resolver.Target{
- Scheme: resolver.GetDefaultScheme(),
- Endpoint: target,
- }
- resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme)
- if resolverBuilder == nil {
- return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme)
- }
+ resolverBuilder, err := cc.parseTargetAndFindResolver()
+ if err != nil {
+ return nil, err
}
-
- creds := cc.dopts.copts.TransportCredentials
- if creds != nil && creds.Info().ServerName != "" {
- cc.authority = creds.Info().ServerName
- } else if cc.dopts.insecure && cc.dopts.authority != "" {
- cc.authority = cc.dopts.authority
- } else {
- // Use endpoint from "scheme://authority/endpoint" as the default
- // authority for ClientConn.
- cc.authority = cc.parsedTarget.Endpoint
+ cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint, cc.target, cc.dopts)
+ if err != nil {
+ return nil, err
}
+ channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)
if cc.dopts.scChan != nil && !scSet {
// Blocking wait for the initial service config.
@@ -274,6 +267,7 @@
case sc, ok := <-cc.dopts.scChan:
if ok {
cc.sc = &sc
+ cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc})
}
case <-ctx.Done():
return nil, ctx.Err()
@@ -291,6 +285,8 @@
DialCreds: credsClone,
CredsBundle: cc.dopts.copts.CredsBundle,
Dialer: cc.dopts.copts.Dialer,
+ Authority: cc.authority,
+ CustomUserAgent: cc.dopts.copts.UserAgent,
ChannelzParentID: cc.channelzID,
Target: cc.parsedTarget,
}
@@ -307,11 +303,12 @@
// A blocking dial blocks until the clientConn is ready.
if cc.dopts.block {
for {
+ cc.Connect()
s := cc.GetState()
if s == connectivity.Ready {
break
} else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
- if err = cc.blockingpicker.connectionError(); err != nil {
+ if err = cc.connectionError(); err != nil {
terr, ok := err.(interface {
Temporary() bool
})
@@ -322,6 +319,9 @@
}
if !cc.WaitForStateChange(ctx, s) {
// ctx got timeout or canceled.
+ if err = cc.connectionError(); err != nil && cc.dopts.returnLastError {
+ return nil, err
+ }
return nil, ctx.Err()
}
}
@@ -414,7 +414,7 @@
return
}
csm.state = state
- channelz.Infof(csm.channelzID, "Channel Connectivity change to %v", state)
+ channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state)
if csm.notifyChan != nil {
// There are other goroutines waiting on this channel.
close(csm.notifyChan)
@@ -476,6 +476,8 @@
balancerBuildOpts balancer.BuildOptions
blockingpicker *pickerWrapper
+ safeConfigSelector iresolver.SafeConfigSelector
+
mu sync.RWMutex
resolverWrapper *ccResolverWrapper
sc *ServiceConfig
@@ -490,11 +492,18 @@
channelzID int64 // channelz unique identification number
czData *channelzData
+
+ lceMu sync.Mutex // protects lastConnectionError
+ lastConnectionError error
}
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
// ctx expires. A true value is returned in former case and false in latter.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool {
ch := cc.csMgr.getNotifyChan()
if cc.csMgr.getState() != sourceState {
@@ -509,11 +518,34 @@
}
// GetState returns the connectivity.State of ClientConn.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
+// release.
func (cc *ClientConn) GetState() connectivity.State {
return cc.csMgr.getState()
}
+// Connect causes all subchannels in the ClientConn to attempt to connect if
+// the channel is idle. Does not wait for the connection attempts to begin
+// before returning.
+//
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
+// release.
+func (cc *ClientConn) Connect() {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ if cc.balancerWrapper != nil && cc.balancerWrapper.exitIdle() {
+ return
+ }
+ for ac := range cc.conns {
+ go ac.connect()
+ }
+}
+
func (cc *ClientConn) scWatcher() {
for {
select {
@@ -525,6 +557,7 @@
// TODO: load balance policy runtime change is ignored.
// We may revisit this decision in the future.
cc.sc = &sc
+ cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc})
cc.mu.Unlock()
case <-cc.ctx.Done():
return
@@ -563,13 +596,13 @@
func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) {
if cc.sc != nil {
- cc.applyServiceConfigAndBalancer(cc.sc, addrs)
+ cc.applyServiceConfigAndBalancer(cc.sc, nil, addrs)
return
}
if cc.dopts.defaultServiceConfig != nil {
- cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, addrs)
+ cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}, addrs)
} else {
- cc.applyServiceConfigAndBalancer(emptyServiceConfig, addrs)
+ cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}, addrs)
}
}
@@ -600,13 +633,24 @@
}
var ret error
- if cc.dopts.disableServiceConfig || s.ServiceConfig == nil {
+ if cc.dopts.disableServiceConfig {
+ channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig)
+ cc.maybeApplyDefaultServiceConfig(s.Addresses)
+ } else if s.ServiceConfig == nil {
cc.maybeApplyDefaultServiceConfig(s.Addresses)
// TODO: do we need to apply a failing LB policy if there is no
// default, per the error handling design?
} else {
if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok {
- cc.applyServiceConfigAndBalancer(sc, s.Addresses)
+ configSelector := iresolver.GetConfigSelector(s)
+ if configSelector != nil {
+ if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 {
+ channelz.Infof(logger, cc.channelzID, "method configs in service config will be ignored due to presence of config selector")
+ }
+ } else {
+ configSelector = &defaultConfigSelector{sc}
+ }
+ cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses)
} else {
ret = balancer.ErrBadResolverState
if cc.balancerWrapper == nil {
@@ -616,6 +660,7 @@
} else {
err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config)
}
+ cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc})
cc.blockingpicker.updatePicker(base.NewErrPicker(err))
cc.csMgr.updateState(connectivity.TransientFailure)
cc.mu.Unlock()
@@ -664,22 +709,27 @@
return
}
- channelz.Infof(cc.channelzID, "ClientConn switching balancer to %q", name)
+ channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name)
if cc.dopts.balancerBuilder != nil {
- channelz.Info(cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead")
+ channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead")
return
}
if cc.balancerWrapper != nil {
+ // Don't hold cc.mu while closing the balancers. The balancers may call
+ // methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex
+ // would cause a deadlock in that case.
+ cc.mu.Unlock()
cc.balancerWrapper.close()
+ cc.mu.Lock()
}
builder := balancer.Get(name)
if builder == nil {
- channelz.Warningf(cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName)
- channelz.Infof(cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name)
+ channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName)
+ channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name)
builder = newPickfirstBuilder()
} else {
- channelz.Infof(cc.channelzID, "Channel switches to new LB policy %q", name)
+ channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name)
}
cc.curBalancerName = builder.Name()
@@ -720,12 +770,12 @@
}
if channelz.IsOn() {
ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "")
- channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{
+ channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
Desc: "Subchannel Created",
- Severity: channelz.CtINFO,
+ Severity: channelz.CtInfo,
Parent: &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID),
- Severity: channelz.CtINFO,
+ Severity: channelz.CtInfo,
},
})
}
@@ -759,7 +809,11 @@
}
// Target returns the target string of the ClientConn.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func (cc *ClientConn) Target() string {
return cc.target
}
@@ -795,8 +849,7 @@
ac.updateConnectivityState(connectivity.Connecting, nil)
ac.mu.Unlock()
- // Start a goroutine connecting to the server asynchronously.
- go ac.resetTransport()
+ ac.resetTransport()
return nil
}
@@ -818,7 +871,7 @@
func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
ac.mu.Lock()
defer ac.mu.Unlock()
- channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
+ channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
if ac.state == connectivity.Shutdown ||
ac.state == connectivity.TransientFailure ||
ac.state == connectivity.Idle {
@@ -833,12 +886,13 @@
// ac.state is Ready, try to find the connected address.
var curAddrFound bool
for _, a := range addrs {
+ a.ServerName = ac.cc.getServerName(a)
if reflect.DeepEqual(ac.curAddr, a) {
curAddrFound = true
break
}
}
- channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
+ channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
if curAddrFound {
ac.addrs = addrs
}
@@ -846,26 +900,53 @@
return curAddrFound
}
+// getServerName determines the serverName to be used in the connection
+// handshake. The default value for the serverName is the authority on the
+// ClientConn, which either comes from the user's dial target or through an
+// authority override specified using the WithAuthority dial option. Name
+// resolvers can specify a per-address override for the serverName through the
+// resolver.Address.ServerName field which is used only if the WithAuthority
+// dial option was not used. The rationale is that per-address authority
+// overrides specified by the name resolver can represent a security risk, while
+// an override specified by the user is more dependable since they probably know
+// what they are doing.
+func (cc *ClientConn) getServerName(addr resolver.Address) string {
+ if cc.dopts.authority != "" {
+ return cc.dopts.authority
+ }
+ if addr.ServerName != "" {
+ return addr.ServerName
+ }
+ return cc.authority
+}
+
+func getMethodConfig(sc *ServiceConfig, method string) MethodConfig {
+ if sc == nil {
+ return MethodConfig{}
+ }
+ if m, ok := sc.Methods[method]; ok {
+ return m
+ }
+ i := strings.LastIndex(method, "/")
+ if m, ok := sc.Methods[method[:i+1]]; ok {
+ return m
+ }
+ return sc.Methods[""]
+}
+
// GetMethodConfig gets the method config of the input method.
// If there's an exact match for input method (i.e. /service/method), we return
// the corresponding MethodConfig.
-// If there isn't an exact match for the input method, we look for the default config
-// under the service (i.e /service/). If there is a default MethodConfig for
-// the service, we return it.
+// If there isn't an exact match for the input method, we look for the service's default
+// config under the service (i.e /service/) and then for the default for all services (empty string).
+//
+// If there is a default MethodConfig for the service, we return it.
// Otherwise, we return an empty MethodConfig.
func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
// TODO: Avoid the locking here.
cc.mu.RLock()
defer cc.mu.RUnlock()
- if cc.sc == nil {
- return MethodConfig{}
- }
- m, ok := cc.sc.Methods[method]
- if !ok {
- i := strings.LastIndex(method, "/")
- m = cc.sc.Methods[method[:i+1]]
- }
- return m
+ return getMethodConfig(cc.sc, method)
}
func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
@@ -888,12 +969,15 @@
return t, done, nil
}
-func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []resolver.Address) {
+func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) {
if sc == nil {
// should never reach here.
return
}
cc.sc = sc
+ if configSelector != nil {
+ cc.safeConfigSelector.UpdateConfigSelector(configSelector)
+ }
if cc.sc.retryThrottling != nil {
newThrottler := &retryThrottler{
@@ -957,7 +1041,10 @@
// However, if a previously unavailable network becomes available, this may be
// used to trigger an immediate reconnect.
//
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func (cc *ClientConn) ResetConnectBackoff() {
cc.mu.Lock()
conns := cc.conns
@@ -988,12 +1075,12 @@
cc.blockingpicker.close()
- if rWrapper != nil {
- rWrapper.close()
- }
if bWrapper != nil {
bWrapper.close()
}
+ if rWrapper != nil {
+ rWrapper.close()
+ }
for ac := range conns {
ac.tearDown(ErrClientConnClosing)
@@ -1001,15 +1088,15 @@
if channelz.IsOn() {
ted := &channelz.TraceEventDesc{
Desc: "Channel Deleted",
- Severity: channelz.CtINFO,
+ Severity: channelz.CtInfo,
}
if cc.dopts.channelzParentID != 0 {
ted.Parent = &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID),
- Severity: channelz.CtINFO,
+ Severity: channelz.CtInfo,
}
}
- channelz.AddTraceEvent(cc.channelzID, 0, ted)
+ channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
// the entity being deleted, and thus prevent it from being deleted right away.
channelz.RemoveEntry(cc.channelzID)
@@ -1053,7 +1140,7 @@
return
}
ac.state = s
- channelz.Infof(ac.channelzID, "Subchannel Connectivity change to %v", s)
+ channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s)
ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr)
}
@@ -1072,112 +1159,86 @@
}
func (ac *addrConn) resetTransport() {
- for i := 0; ; i++ {
- if i > 0 {
- ac.cc.resolveNow(resolver.ResolveNowOptions{})
- }
-
- ac.mu.Lock()
- if ac.state == connectivity.Shutdown {
- ac.mu.Unlock()
- return
- }
-
- addrs := ac.addrs
- backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx)
- // This will be the duration that dial gets to finish.
- dialDuration := minConnectTimeout
- if ac.dopts.minConnectTimeout != nil {
- dialDuration = ac.dopts.minConnectTimeout()
- }
-
- if dialDuration < backoffFor {
- // Give dial more time as we keep failing to connect.
- dialDuration = backoffFor
- }
- // We can potentially spend all the time trying the first address, and
- // if the server accepts the connection and then hangs, the following
- // addresses will never be tried.
- //
- // The spec doesn't mention what should be done for multiple addresses.
- // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm
- connectDeadline := time.Now().Add(dialDuration)
-
- ac.updateConnectivityState(connectivity.Connecting, nil)
- ac.transport = nil
+ ac.mu.Lock()
+ if ac.state == connectivity.Shutdown {
ac.mu.Unlock()
-
- newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline)
- if err != nil {
- // After exhausting all addresses, the addrConn enters
- // TRANSIENT_FAILURE.
- ac.mu.Lock()
- if ac.state == connectivity.Shutdown {
- ac.mu.Unlock()
- return
- }
- ac.updateConnectivityState(connectivity.TransientFailure, err)
-
- // Backoff.
- b := ac.resetBackoff
- ac.mu.Unlock()
-
- timer := time.NewTimer(backoffFor)
- select {
- case <-timer.C:
- ac.mu.Lock()
- ac.backoffIdx++
- ac.mu.Unlock()
- case <-b:
- timer.Stop()
- case <-ac.ctx.Done():
- timer.Stop()
- return
- }
- continue
- }
-
- ac.mu.Lock()
- if ac.state == connectivity.Shutdown {
- ac.mu.Unlock()
- newTr.Close()
- return
- }
- ac.curAddr = addr
- ac.transport = newTr
- ac.backoffIdx = 0
-
- hctx, hcancel := context.WithCancel(ac.ctx)
- ac.startHealthCheck(hctx)
- ac.mu.Unlock()
-
- // Block until the created transport is down. And when this happens,
- // we restart from the top of the addr list.
- <-reconnect.Done()
- hcancel()
- // restart connecting - the top of the loop will set state to
- // CONNECTING. This is against the current connectivity semantics doc,
- // however it allows for graceful behavior for RPCs not yet dispatched
- // - unfortunate timing would otherwise lead to the RPC failing even
- // though the TRANSIENT_FAILURE state (called for by the doc) would be
- // instantaneous.
- //
- // Ideally we should transition to Idle here and block until there is
- // RPC activity that leads to the balancer requesting a reconnect of
- // the associated SubConn.
+ return
}
+
+ addrs := ac.addrs
+ backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx)
+ // This will be the duration that dial gets to finish.
+ dialDuration := minConnectTimeout
+ if ac.dopts.minConnectTimeout != nil {
+ dialDuration = ac.dopts.minConnectTimeout()
+ }
+
+ if dialDuration < backoffFor {
+ // Give dial more time as we keep failing to connect.
+ dialDuration = backoffFor
+ }
+ // We can potentially spend all the time trying the first address, and
+ // if the server accepts the connection and then hangs, the following
+ // addresses will never be tried.
+ //
+ // The spec doesn't mention what should be done for multiple addresses.
+ // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm
+ connectDeadline := time.Now().Add(dialDuration)
+
+ ac.updateConnectivityState(connectivity.Connecting, nil)
+ ac.mu.Unlock()
+
+ if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil {
+ ac.cc.resolveNow(resolver.ResolveNowOptions{})
+ // After exhausting all addresses, the addrConn enters
+ // TRANSIENT_FAILURE.
+ ac.mu.Lock()
+ if ac.state == connectivity.Shutdown {
+ ac.mu.Unlock()
+ return
+ }
+ ac.updateConnectivityState(connectivity.TransientFailure, err)
+
+ // Backoff.
+ b := ac.resetBackoff
+ ac.mu.Unlock()
+
+ timer := time.NewTimer(backoffFor)
+ select {
+ case <-timer.C:
+ ac.mu.Lock()
+ ac.backoffIdx++
+ ac.mu.Unlock()
+ case <-b:
+ timer.Stop()
+ case <-ac.ctx.Done():
+ timer.Stop()
+ return
+ }
+
+ ac.mu.Lock()
+ if ac.state != connectivity.Shutdown {
+ ac.updateConnectivityState(connectivity.Idle, err)
+ }
+ ac.mu.Unlock()
+ return
+ }
+ // Success; reset backoff.
+ ac.mu.Lock()
+ ac.backoffIdx = 0
+ ac.mu.Unlock()
}
-// tryAllAddrs tries to creates a connection to the addresses, and stop when at the
-// first successful one. It returns the transport, the address and a Event in
-// the successful case. The Event fires when the returned transport disconnects.
-func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) {
+// tryAllAddrs tries to creates a connection to the addresses, and stop when at
+// the first successful one. It returns an error if no address was successfully
+// connected, or updates ac appropriately with the new transport.
+func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error {
var firstConnErr error
for _, addr := range addrs {
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
ac.mu.Unlock()
- return nil, resolver.Address{}, nil, errConnClosing
+ return errConnClosing
}
ac.cc.mu.RLock()
@@ -1190,77 +1251,63 @@
}
ac.mu.Unlock()
- channelz.Infof(ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr)
+ channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr)
- newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline)
+ err := ac.createTransport(addr, copts, connectDeadline)
if err == nil {
- return newTr, addr, reconnect, nil
+ return nil
}
if firstConnErr == nil {
firstConnErr = err
}
- ac.cc.blockingpicker.updateConnectionError(err)
+ ac.cc.updateConnectionError(err)
}
// Couldn't connect to any address.
- return nil, resolver.Address{}, nil, firstConnErr
+ return firstConnErr
}
-// createTransport creates a connection to addr. It returns the transport and a
-// Event in the successful case. The Event fires when the returned transport
-// disconnects.
-func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) {
- prefaceReceived := make(chan struct{})
- onCloseCalled := make(chan struct{})
- reconnect := grpcsync.NewEvent()
+// createTransport creates a connection to addr. It returns an error if the
+// address was not successfully connected, or updates ac appropriately with the
+// new transport.
+func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error {
+ // TODO: Delete prefaceReceived and move the logic to wait for it into the
+ // transport.
+ prefaceReceived := grpcsync.NewEvent()
+ connClosed := grpcsync.NewEvent()
- authority := ac.cc.authority
- // addr.ServerName takes precedent over ClientConn authority, if present.
- if addr.ServerName != "" {
- authority = addr.ServerName
- }
-
- target := transport.TargetInfo{
- Addr: addr.Addr,
- Metadata: addr.Metadata,
- Authority: authority,
- }
-
- once := sync.Once{}
- onGoAway := func(r transport.GoAwayReason) {
- ac.mu.Lock()
- ac.adjustParams(r)
- once.Do(func() {
- if ac.state == connectivity.Ready {
- // Prevent this SubConn from being used for new RPCs by setting its
- // state to Connecting.
- //
- // TODO: this should be Idle when grpc-go properly supports it.
- ac.updateConnectivityState(connectivity.Connecting, nil)
- }
- })
- ac.mu.Unlock()
- reconnect.Fire()
- }
+ addr.ServerName = ac.cc.getServerName(addr)
+ hctx, hcancel := context.WithCancel(ac.ctx)
+ hcStarted := false // protected by ac.mu
onClose := func() {
ac.mu.Lock()
- once.Do(func() {
- if ac.state == connectivity.Ready {
- // Prevent this SubConn from being used for new RPCs by setting its
- // state to Connecting.
- //
- // TODO: this should be Idle when grpc-go properly supports it.
- ac.updateConnectivityState(connectivity.Connecting, nil)
- }
- })
- ac.mu.Unlock()
- close(onCloseCalled)
- reconnect.Fire()
+ defer ac.mu.Unlock()
+ defer connClosed.Fire()
+ if !hcStarted || hctx.Err() != nil {
+ // We didn't start the health check or set the state to READY, so
+ // no need to do anything else here.
+ //
+ // OR, we have already cancelled the health check context, meaning
+ // we have already called onClose once for this transport. In this
+ // case it would be dangerous to clear the transport and update the
+ // state, since there may be a new transport in this addrConn.
+ return
+ }
+ hcancel()
+ ac.transport = nil
+ // Refresh the name resolver
+ ac.cc.resolveNow(resolver.ResolveNowOptions{})
+ if ac.state != connectivity.Shutdown {
+ ac.updateConnectivityState(connectivity.Idle, nil)
+ }
}
- onPrefaceReceipt := func() {
- close(prefaceReceived)
+ onGoAway := func(r transport.GoAwayReason) {
+ ac.mu.Lock()
+ ac.adjustParams(r)
+ ac.mu.Unlock()
+ onClose()
}
connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
@@ -1269,27 +1316,67 @@
copts.ChannelzParentID = ac.channelzID
}
- newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose)
+ newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose)
if err != nil {
// newTr is either nil, or closed.
- channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err)
- return nil, nil, err
+ channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v", addr, err)
+ return err
}
select {
- case <-time.After(time.Until(connectDeadline)):
+ case <-connectCtx.Done():
// We didn't get the preface in time.
- newTr.Close()
- channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
- return nil, nil, errors.New("timed out waiting for server handshake")
- case <-prefaceReceived:
+ // The error we pass to Close() is immaterial since there are no open
+ // streams at this point, so no trailers with error details will be sent
+ // out. We just need to pass a non-nil error.
+ newTr.Close(transport.ErrConnClosing)
+ if connectCtx.Err() == context.DeadlineExceeded {
+ err := errors.New("failed to receive server preface within timeout")
+ channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: %v", addr, err)
+ return err
+ }
+ return nil
+ case <-prefaceReceived.Done():
// We got the preface - huzzah! things are good.
- case <-onCloseCalled:
- // The transport has already closed - noop.
- return nil, nil, errors.New("connection closed")
- // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix.
+ ac.mu.Lock()
+ defer ac.mu.Unlock()
+ if connClosed.HasFired() {
+ // onClose called first; go idle but do nothing else.
+ if ac.state != connectivity.Shutdown {
+ ac.updateConnectivityState(connectivity.Idle, nil)
+ }
+ return nil
+ }
+ if ac.state == connectivity.Shutdown {
+ // This can happen if the subConn was removed while in `Connecting`
+ // state. tearDown() would have set the state to `Shutdown`, but
+ // would not have closed the transport since ac.transport would not
+ // been set at that point.
+ //
+ // We run this in a goroutine because newTr.Close() calls onClose()
+ // inline, which requires locking ac.mu.
+ //
+ // The error we pass to Close() is immaterial since there are no open
+ // streams at this point, so no trailers with error details will be sent
+ // out. We just need to pass a non-nil error.
+ go newTr.Close(transport.ErrConnClosing)
+ return nil
+ }
+ ac.curAddr = addr
+ ac.transport = newTr
+ hcStarted = true
+ ac.startHealthCheck(hctx) // Will set state to READY if appropriate.
+ return nil
+ case <-connClosed.Done():
+ // The transport has already closed. If we received the preface, too,
+ // this is not an error.
+ select {
+ case <-prefaceReceived.Done():
+ return nil
+ default:
+ return errors.New("connection closed before server preface received")
+ }
}
- return newTr, reconnect, nil
}
// startHealthCheck starts the health checking stream (RPC) to watch the health
@@ -1297,7 +1384,7 @@
//
// LB channel health checking is enabled when all requirements below are met:
// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption
-// 2. internal.HealthCheckFunc is set by importing the grpc/healthcheck package
+// 2. internal.HealthCheckFunc is set by importing the grpc/health package
// 3. a service config with non-empty healthCheckConfig field is provided
// 4. the load balancer requests it
//
@@ -1327,7 +1414,7 @@
// The health package is not imported to set health check function.
//
// TODO: add a link to the health check doc in the error message.
- channelz.Error(ac.channelzID, "Health check is requested but health check function is not set.")
+ channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.")
return
}
@@ -1357,9 +1444,9 @@
err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
if err != nil {
if status.Code(err) == codes.Unimplemented {
- channelz.Error(ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled")
+ channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled")
} else {
- channelz.Errorf(ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err)
+ channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err)
}
}
}()
@@ -1373,33 +1460,20 @@
ac.mu.Unlock()
}
-// getReadyTransport returns the transport if ac's state is READY.
-// Otherwise it returns nil, false.
-// If ac's state is IDLE, it will trigger ac to connect.
-func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) {
+// getReadyTransport returns the transport if ac's state is READY or nil if not.
+func (ac *addrConn) getReadyTransport() transport.ClientTransport {
ac.mu.Lock()
- if ac.state == connectivity.Ready && ac.transport != nil {
- t := ac.transport
- ac.mu.Unlock()
- return t, true
+ defer ac.mu.Unlock()
+ if ac.state == connectivity.Ready {
+ return ac.transport
}
- var idle bool
- if ac.state == connectivity.Idle {
- idle = true
- }
- ac.mu.Unlock()
- // Trigger idle ac to connect.
- if idle {
- ac.connect()
- }
- return nil, false
+ return nil
}
// tearDown starts to tear down the addrConn.
-// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in
-// some edge cases (e.g., the caller opens and closes many addrConn's in a
-// tight loop.
-// tearDown doesn't remove ac from ac.cc.conns.
+//
+// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct
+// will leak. In most cases, call cc.removeAddrConn() instead.
func (ac *addrConn) tearDown(err error) {
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
@@ -1424,12 +1498,12 @@
ac.mu.Lock()
}
if channelz.IsOn() {
- channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{
+ channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
Desc: "Subchannel Deleted",
- Severity: channelz.CtINFO,
+ Severity: channelz.CtInfo,
Parent: &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID),
- Severity: channelz.CtINFO,
+ Severity: channelz.CtInfo,
},
})
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
@@ -1532,3 +1606,126 @@
}
return resolver.Get(scheme)
}
+
+func (cc *ClientConn) updateConnectionError(err error) {
+ cc.lceMu.Lock()
+ cc.lastConnectionError = err
+ cc.lceMu.Unlock()
+}
+
+func (cc *ClientConn) connectionError() error {
+ cc.lceMu.Lock()
+ defer cc.lceMu.Unlock()
+ return cc.lastConnectionError
+}
+
+func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) {
+ channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target)
+
+ var rb resolver.Builder
+ parsedTarget, err := parseTarget(cc.target)
+ if err != nil {
+ channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err)
+ } else {
+ channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget)
+ rb = cc.getResolver(parsedTarget.Scheme)
+ if rb != nil {
+ cc.parsedTarget = parsedTarget
+ return rb, nil
+ }
+ }
+
+ // We are here because the user's dial target did not contain a scheme or
+ // specified an unregistered scheme. We should fallback to the default
+ // scheme, except when a custom dialer is specified in which case, we should
+ // always use passthrough scheme.
+ defScheme := resolver.GetDefaultScheme()
+ channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme)
+ canonicalTarget := defScheme + ":///" + cc.target
+
+ parsedTarget, err = parseTarget(canonicalTarget)
+ if err != nil {
+ channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err)
+ return nil, err
+ }
+ channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget)
+ rb = cc.getResolver(parsedTarget.Scheme)
+ if rb == nil {
+ return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.Scheme)
+ }
+ cc.parsedTarget = parsedTarget
+ return rb, nil
+}
+
+// parseTarget uses RFC 3986 semantics to parse the given target into a
+// resolver.Target struct containing scheme, authority and endpoint. Query
+// params are stripped from the endpoint.
+func parseTarget(target string) (resolver.Target, error) {
+ u, err := url.Parse(target)
+ if err != nil {
+ return resolver.Target{}, err
+ }
+ // For targets of the form "[scheme]://[authority]/endpoint, the endpoint
+ // value returned from url.Parse() contains a leading "/". Although this is
+ // in accordance with RFC 3986, we do not want to break existing resolver
+ // implementations which expect the endpoint without the leading "/". So, we
+ // end up stripping the leading "/" here. But this will result in an
+ // incorrect parsing for something like "unix:///path/to/socket". Since we
+ // own the "unix" resolver, we can workaround in the unix resolver by using
+ // the `URL` field instead of the `Endpoint` field.
+ endpoint := u.Path
+ if endpoint == "" {
+ endpoint = u.Opaque
+ }
+ endpoint = strings.TrimPrefix(endpoint, "/")
+ return resolver.Target{
+ Scheme: u.Scheme,
+ Authority: u.Host,
+ Endpoint: endpoint,
+ URL: *u,
+ }, nil
+}
+
+// Determine channel authority. The order of precedence is as follows:
+// - user specified authority override using `WithAuthority` dial option
+// - creds' notion of server name for the authentication handshake
+// - endpoint from dial target of the form "scheme://[authority]/endpoint"
+func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) {
+ // Historically, we had two options for users to specify the serverName or
+ // authority for a channel. One was through the transport credentials
+ // (either in its constructor, or through the OverrideServerName() method).
+ // The other option (for cases where WithInsecure() dial option was used)
+ // was to use the WithAuthority() dial option.
+ //
+ // A few things have changed since:
+ // - `insecure` package with an implementation of the `TransportCredentials`
+ // interface for the insecure case
+ // - WithAuthority() dial option support for secure credentials
+ authorityFromCreds := ""
+ if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" {
+ authorityFromCreds = creds.Info().ServerName
+ }
+ authorityFromDialOption := dopts.authority
+ if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption {
+ return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption)
+ }
+
+ switch {
+ case authorityFromDialOption != "":
+ return authorityFromDialOption, nil
+ case authorityFromCreds != "":
+ return authorityFromCreds, nil
+ case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"):
+ // TODO: remove when the unix resolver implements optional interface to
+ // return channel authority.
+ return "localhost", nil
+ case strings.HasPrefix(endpoint, ":"):
+ return "localhost" + endpoint, nil
+ default:
+ // TODO: Define an optional interface on the resolver builder to return
+ // the channel authority given the user's dial target. For resolvers
+ // which don't implement this interface, we will use the endpoint from
+ // "scheme://authority/endpoint" as the default authority.
+ return endpoint, nil
+ }
+}
diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go
index 0273883..11b1061 100644
--- a/vendor/google.golang.org/grpc/codes/codes.go
+++ b/vendor/google.golang.org/grpc/codes/codes.go
@@ -33,6 +33,9 @@
OK Code = 0
// Canceled indicates the operation was canceled (typically by the caller).
+ //
+ // The gRPC framework will generate this error code when cancellation
+ // is requested.
Canceled Code = 1
// Unknown error. An example of where this error may be returned is
@@ -40,12 +43,17 @@
// an error-space that is not known in this address space. Also
// errors raised by APIs that do not return enough error information
// may be converted to this error.
+ //
+ // The gRPC framework will generate this error code in the above two
+ // mentioned cases.
Unknown Code = 2
// InvalidArgument indicates client specified an invalid argument.
// Note that this differs from FailedPrecondition. It indicates arguments
// that are problematic regardless of the state of the system
// (e.g., a malformed file name).
+ //
+ // This error code will not be generated by the gRPC framework.
InvalidArgument Code = 3
// DeadlineExceeded means operation expired before completion.
@@ -53,14 +61,21 @@
// returned even if the operation has completed successfully. For
// example, a successful response from a server could have been delayed
// long enough for the deadline to expire.
+ //
+ // The gRPC framework will generate this error code when the deadline is
+ // exceeded.
DeadlineExceeded Code = 4
// NotFound means some requested entity (e.g., file or directory) was
// not found.
+ //
+ // This error code will not be generated by the gRPC framework.
NotFound Code = 5
// AlreadyExists means an attempt to create an entity failed because one
// already exists.
+ //
+ // This error code will not be generated by the gRPC framework.
AlreadyExists Code = 6
// PermissionDenied indicates the caller does not have permission to
@@ -69,10 +84,17 @@
// instead for those errors). It must not be
// used if the caller cannot be identified (use Unauthenticated
// instead for those errors).
+ //
+ // This error code will not be generated by the gRPC core framework,
+ // but expect authentication middleware to use it.
PermissionDenied Code = 7
// ResourceExhausted indicates some resource has been exhausted, perhaps
// a per-user quota, or perhaps the entire file system is out of space.
+ //
+ // This error code will be generated by the gRPC framework in
+ // out-of-memory and server overload situations, or when a message is
+ // larger than the configured maximum size.
ResourceExhausted Code = 8
// FailedPrecondition indicates operation was rejected because the
@@ -94,6 +116,8 @@
// REST Get/Update/Delete on a resource and the resource on the
// server does not match the condition. E.g., conflicting
// read-modify-write on the same resource.
+ //
+ // This error code will not be generated by the gRPC framework.
FailedPrecondition Code = 9
// Aborted indicates the operation was aborted, typically due to a
@@ -102,6 +126,8 @@
//
// See litmus test above for deciding between FailedPrecondition,
// Aborted, and Unavailable.
+ //
+ // This error code will not be generated by the gRPC framework.
Aborted Code = 10
// OutOfRange means operation was attempted past the valid range.
@@ -119,15 +145,26 @@
// error) when it applies so that callers who are iterating through
// a space can easily look for an OutOfRange error to detect when
// they are done.
+ //
+ // This error code will not be generated by the gRPC framework.
OutOfRange Code = 11
// Unimplemented indicates operation is not implemented or not
// supported/enabled in this service.
+ //
+ // This error code will be generated by the gRPC framework. Most
+ // commonly, you will see this error code when a method implementation
+ // is missing on the server. It can also be generated for unknown
+ // compression algorithms or a disagreement as to whether an RPC should
+ // be streaming.
Unimplemented Code = 12
// Internal errors. Means some invariants expected by underlying
// system has been broken. If you see one of these errors,
// something is very broken.
+ //
+ // This error code will be generated by the gRPC framework in several
+ // internal error conditions.
Internal Code = 13
// Unavailable indicates the service is currently unavailable.
@@ -137,13 +174,22 @@
//
// See litmus test above for deciding between FailedPrecondition,
// Aborted, and Unavailable.
+ //
+ // This error code will be generated by the gRPC framework during
+ // abrupt shutdown of a server process or network connection.
Unavailable Code = 14
// DataLoss indicates unrecoverable data loss or corruption.
+ //
+ // This error code will not be generated by the gRPC framework.
DataLoss Code = 15
// Unauthenticated indicates the request does not have valid
// authentication credentials for the operation.
+ //
+ // The gRPC framework will generate this error code when the
+ // authentication metadata is invalid or a Credentials callback fails,
+ // but also expect authentication middleware to generate it.
Unauthenticated Code = 16
_maxCode = 17
diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go
index 34ec36f..4a89926 100644
--- a/vendor/google.golang.org/grpc/connectivity/connectivity.go
+++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go
@@ -18,15 +18,14 @@
// Package connectivity defines connectivity semantics.
// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md.
-// All APIs in this package are experimental.
package connectivity
import (
- "context"
-
"google.golang.org/grpc/grpclog"
)
+var logger = grpclog.Component("core")
+
// State indicates the state of connectivity.
// It can be the state of a ClientConn or SubConn.
type State int
@@ -44,8 +43,8 @@
case Shutdown:
return "SHUTDOWN"
default:
- grpclog.Errorf("unknown connectivity state: %d", s)
- return "Invalid-State"
+ logger.Errorf("unknown connectivity state: %d", s)
+ return "INVALID_STATE"
}
}
@@ -62,12 +61,34 @@
Shutdown
)
-// Reporter reports the connectivity states.
-type Reporter interface {
- // CurrentState returns the current state of the reporter.
- CurrentState() State
- // WaitForStateChange blocks until the reporter's state is different from the given state,
- // and returns true.
- // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled).
- WaitForStateChange(context.Context, State) bool
+// ServingMode indicates the current mode of operation of the server.
+//
+// Only xDS enabled gRPC servers currently report their serving mode.
+type ServingMode int
+
+const (
+ // ServingModeStarting indicates that the server is starting up.
+ ServingModeStarting ServingMode = iota
+ // ServingModeServing indicates that the server contains all required
+ // configuration and is serving RPCs.
+ ServingModeServing
+ // ServingModeNotServing indicates that the server is not accepting new
+ // connections. Existing connections will be closed gracefully, allowing
+ // in-progress RPCs to complete. A server enters this mode when it does not
+ // contain the required configuration to serve RPCs.
+ ServingModeNotServing
+)
+
+func (s ServingMode) String() string {
+ switch s {
+ case ServingModeStarting:
+ return "STARTING"
+ case ServingModeServing:
+ return "SERVING"
+ case ServingModeNotServing:
+ return "NOT_SERVING"
+ default:
+ logger.Errorf("unknown serving mode: %d", s)
+ return "INVALID_MODE"
+ }
}
diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
index e438fda..96ff187 100644
--- a/vendor/google.golang.org/grpc/credentials/credentials.go
+++ b/vendor/google.golang.org/grpc/credentials/credentials.go
@@ -29,7 +29,8 @@
"net"
"github.com/golang/protobuf/proto"
- "google.golang.org/grpc/internal"
+ "google.golang.org/grpc/attributes"
+ icredentials "google.golang.org/grpc/internal/credentials"
)
// PerRPCCredentials defines the common interface for the credentials which need to
@@ -57,9 +58,11 @@
type SecurityLevel int
const (
- // NoSecurity indicates a connection is insecure.
+ // InvalidSecurityLevel indicates an invalid security level.
// The zero SecurityLevel value is invalid for backward compatibility.
- NoSecurity SecurityLevel = iota + 1
+ InvalidSecurityLevel SecurityLevel = iota
+ // NoSecurity indicates a connection is insecure.
+ NoSecurity
// IntegrityOnly indicates a connection only provides integrity protection.
IntegrityOnly
// PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection.
@@ -89,7 +92,7 @@
}
// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct.
-func (c *CommonAuthInfo) GetCommonAuthInfo() *CommonAuthInfo {
+func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo {
return c
}
@@ -124,15 +127,23 @@
// TransportCredentials defines the common interface for all the live gRPC wire
// protocols and supported transport security protocols (e.g., TLS, SSL).
type TransportCredentials interface {
- // ClientHandshake does the authentication handshake specified by the corresponding
- // authentication protocol on rawConn for clients. It returns the authenticated
- // connection and the corresponding auth information about the connection.
- // The auth information should embed CommonAuthInfo to return additional information about
- // the credentials. Implementations must use the provided context to implement timely cancellation.
- // gRPC will try to reconnect if the error returned is a temporary error
- // (io.EOF, context.DeadlineExceeded or err.Temporary() == true).
- // If the returned error is a wrapper error, implementations should make sure that
+ // ClientHandshake does the authentication handshake specified by the
+ // corresponding authentication protocol on rawConn for clients. It returns
+ // the authenticated connection and the corresponding auth information
+ // about the connection. The auth information should embed CommonAuthInfo
+ // to return additional information about the credentials. Implementations
+ // must use the provided context to implement timely cancellation. gRPC
+ // will try to reconnect if the error returned is a temporary error
+ // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the
+ // returned error is a wrapper error, implementations should make sure that
// the error implements Temporary() to have the correct retry behaviors.
+ // Additionally, ClientHandshakeInfo data will be available via the context
+ // passed to this call.
+ //
+ // The second argument to this method is the `:authority` header value used
+ // while creating new streams on this connection after authentication
+ // succeeds. Implementations must use this as the server name during the
+ // authentication handshake.
//
// If the returned net.Conn is closed, it MUST close the net.Conn provided.
ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
@@ -147,9 +158,13 @@
Info() ProtocolInfo
// Clone makes a copy of this TransportCredentials.
Clone() TransportCredentials
- // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server.
- // gRPC internals also use it to override the virtual hosting name if it is set.
- // It must be called before dialing. Currently, this is only used by grpclb.
+ // OverrideServerName specifies the value used for the following:
+ // - verifying the hostname on the returned certificates
+ // - as SNI in the client's handshake to support virtual hosting
+ // - as the value for `:authority` header at stream creation time
+ //
+ // Deprecated: use grpc.WithAuthority instead. Will be supported
+ // throughout 1.x.
OverrideServerName(string) error
}
@@ -163,8 +178,18 @@
//
// This API is experimental.
type Bundle interface {
+ // TransportCredentials returns the transport credentials from the Bundle.
+ //
+ // Implementations must return non-nil transport credentials. If transport
+ // security is not needed by the Bundle, implementations may choose to
+ // return insecure.NewCredentials().
TransportCredentials() TransportCredentials
+
+ // PerRPCCredentials returns the per-RPC credentials from the Bundle.
+ //
+ // May be nil if per-RPC credentials are not needed.
PerRPCCredentials() PerRPCCredentials
+
// NewWithMode should make a copy of Bundle, and switch mode. Modifying the
// existing Bundle may cause races.
//
@@ -182,15 +207,33 @@
AuthInfo AuthInfo
}
-// requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object.
-type requestInfoKey struct{}
-
// RequestInfoFromContext extracts the RequestInfo from the context if it exists.
//
// This API is experimental.
func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) {
- ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo)
- return
+ ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo)
+ return ri, ok
+}
+
+// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes
+// it possible to pass arbitrary data to the handshaker from gRPC, resolver,
+// balancer etc. Individual credential implementations control the actual
+// format of the data that they are willing to receive.
+//
+// This API is experimental.
+type ClientHandshakeInfo struct {
+ // Attributes contains the attributes for the address. It could be provided
+ // by the gRPC, resolver, balancer etc.
+ Attributes *attributes.Attributes
+}
+
+// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored
+// in ctx.
+//
+// This API is experimental.
+func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo {
+ chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo)
+ return chi
}
// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
@@ -198,17 +241,16 @@
// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
//
// This API is experimental.
-func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error {
+func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error {
type internalInfo interface {
- GetCommonAuthInfo() *CommonAuthInfo
+ GetCommonAuthInfo() CommonAuthInfo
}
- ri, _ := RequestInfoFromContext(ctx)
- if ri.AuthInfo == nil {
- return errors.New("unable to obtain SecurityLevel from context")
+ if ai == nil {
+ return errors.New("AuthInfo is nil")
}
- if ci, ok := ri.AuthInfo.(internalInfo); ok {
+ if ci, ok := ai.(internalInfo); ok {
// CommonAuthInfo.SecurityLevel has an invalid value.
- if ci.GetCommonAuthInfo().SecurityLevel == 0 {
+ if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel {
return nil
}
if ci.GetCommonAuthInfo().SecurityLevel < level {
@@ -219,12 +261,6 @@
return nil
}
-func init() {
- internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context {
- return context.WithValue(ctx, requestInfoKey{}, ri)
- }
-}
-
// ChannelzSecurityInfo defines the interface that security protocols should implement
// in order to provide security info to channelz.
//
diff --git a/vendor/google.golang.org/grpc/credentials/go12.go b/vendor/google.golang.org/grpc/credentials/go12.go
deleted file mode 100644
index ccbf35b..0000000
--- a/vendor/google.golang.org/grpc/credentials/go12.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// +build go1.12
-
-/*
- *
- * Copyright 2019 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package credentials
-
-import "crypto/tls"
-
-// This init function adds cipher suite constants only defined in Go 1.12.
-func init() {
- cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256"
- cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384"
- cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256"
-}
diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
new file mode 100644
index 0000000..4fbed12
--- /dev/null
+++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
@@ -0,0 +1,72 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package insecure provides an implementation of the
+// credentials.TransportCredentials interface which disables transport security.
+package insecure
+
+import (
+ "context"
+ "net"
+
+ "google.golang.org/grpc/credentials"
+)
+
+// NewCredentials returns a credentials which disables transport security.
+//
+// Note that using this credentials with per-RPC credentials which require
+// transport security is incompatible and will cause grpc.Dial() to fail.
+func NewCredentials() credentials.TransportCredentials {
+ return insecureTC{}
+}
+
+// insecureTC implements the insecure transport credentials. The handshake
+// methods simply return the passed in net.Conn and set the security level to
+// NoSecurity.
+type insecureTC struct{}
+
+func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
+ return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil
+}
+
+func (insecureTC) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
+ return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil
+}
+
+func (insecureTC) Info() credentials.ProtocolInfo {
+ return credentials.ProtocolInfo{SecurityProtocol: "insecure"}
+}
+
+func (insecureTC) Clone() credentials.TransportCredentials {
+ return insecureTC{}
+}
+
+func (insecureTC) OverrideServerName(string) error {
+ return nil
+}
+
+// info contains the auth information for an insecure connection.
+// It implements the AuthInfo interface.
+type info struct {
+ credentials.CommonAuthInfo
+}
+
+// AuthType returns the type of info as a string.
+func (info) AuthType() string {
+ return "insecure"
+}
diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go
index 86e956b..784822d 100644
--- a/vendor/google.golang.org/grpc/credentials/tls.go
+++ b/vendor/google.golang.org/grpc/credentials/tls.go
@@ -25,8 +25,9 @@
"fmt"
"io/ioutil"
"net"
+ "net/url"
- "google.golang.org/grpc/credentials/internal"
+ credinternal "google.golang.org/grpc/internal/credentials"
)
// TLSInfo contains the auth information for a TLS authenticated connection.
@@ -34,6 +35,8 @@
type TLSInfo struct {
State tls.ConnectionState
CommonAuthInfo
+ // This API is experimental.
+ SPIFFEID *url.URL
}
// AuthType returns the type of TLSInfo as a string.
@@ -69,7 +72,7 @@
func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
// use local cfg to avoid clobbering ServerName if using multiple endpoints
- cfg := cloneTLSConfig(c.config)
+ cfg := credinternal.CloneTLSConfig(c.config)
if cfg.ServerName == "" {
serverName, _, err := net.SplitHostPort(authority)
if err != nil {
@@ -94,7 +97,17 @@
conn.Close()
return nil, nil, ctx.Err()
}
- return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil
+ tlsInfo := TLSInfo{
+ State: conn.ConnectionState(),
+ CommonAuthInfo: CommonAuthInfo{
+ SecurityLevel: PrivacyAndIntegrity,
+ },
+ }
+ id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
+ if id != nil {
+ tlsInfo.SPIFFEID = id
+ }
+ return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
}
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
@@ -103,7 +116,17 @@
conn.Close()
return nil, nil, err
}
- return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil
+ tlsInfo := TLSInfo{
+ State: conn.ConnectionState(),
+ CommonAuthInfo: CommonAuthInfo{
+ SecurityLevel: PrivacyAndIntegrity,
+ },
+ }
+ id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
+ if id != nil {
+ tlsInfo.SPIFFEID = id
+ }
+ return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
}
func (c *tlsCreds) Clone() TransportCredentials {
@@ -115,23 +138,10 @@
return nil
}
-const alpnProtoStrH2 = "h2"
-
-func appendH2ToNextProtos(ps []string) []string {
- for _, p := range ps {
- if p == alpnProtoStrH2 {
- return ps
- }
- }
- ret := make([]string, 0, len(ps)+1)
- ret = append(ret, ps...)
- return append(ret, alpnProtoStrH2)
-}
-
// NewTLS uses c to construct a TransportCredentials based on TLS.
func NewTLS(c *tls.Config) TransportCredentials {
- tc := &tlsCreds{cloneTLSConfig(c)}
- tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos)
+ tc := &tlsCreds{credinternal.CloneTLSConfig(c)}
+ tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos)
return tc
}
@@ -185,7 +195,10 @@
// TLSChannelzSecurityValue defines the struct that TLS protocol should return
// from GetSecurityValue(), containing security info like cipher and certificate used.
//
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type TLSChannelzSecurityValue struct {
ChannelzSecurityValue
StandardName string
@@ -217,19 +230,7 @@
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
-}
-
-// cloneTLSConfig returns a shallow clone of the exported
-// fields of cfg, ignoring the unexported sync.Once, which
-// contains a mutex and must not be copied.
-//
-// If cfg is nil, a new zero tls.Config is returned.
-//
-// TODO: inline this function if possible.
-func cloneTLSConfig(cfg *tls.Config) *tls.Config {
- if cfg == nil {
- return &tls.Config{}
- }
-
- return cfg.Clone()
+ tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256",
+ tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384",
+ tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256",
}
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index 35bde10..c4bf09f 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -27,10 +27,9 @@
"google.golang.org/grpc/backoff"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/internal"
internalbackoff "google.golang.org/grpc/internal/backoff"
- "google.golang.org/grpc/internal/envconfig"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/resolver"
@@ -46,18 +45,17 @@
chainUnaryInts []UnaryClientInterceptor
chainStreamInts []StreamClientInterceptor
- cp Compressor
- dc Decompressor
- bs internalbackoff.Strategy
- block bool
- insecure bool
- timeout time.Duration
- scChan <-chan ServiceConfig
- authority string
- copts transport.ConnectOptions
- callOptions []CallOption
- // This is used by v1 balancer dial option WithBalancer to support v1
- // balancer, and also by WithBalancerName dial option.
+ cp Compressor
+ dc Decompressor
+ bs internalbackoff.Strategy
+ block bool
+ returnLastError bool
+ timeout time.Duration
+ scChan <-chan ServiceConfig
+ authority string
+ copts transport.ConnectOptions
+ callOptions []CallOption
+ // This is used by WithBalancerName dial option.
balancerBuilder balancer.Builder
channelzParentID int64
disableServiceConfig bool
@@ -67,12 +65,7 @@
minConnectTimeout func() time.Duration
defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
defaultServiceConfigRawJSON *string
- // This is used by ccResolverWrapper to backoff between successive calls to
- // resolver.ResolveNow(). The user will have no need to configure this, but
- // we need to be able to configure this in tests.
- resolveNowBackoff func(int) time.Duration
- resolvers []resolver.Builder
- withProxy bool
+ resolvers []resolver.Builder
}
// DialOption configures how we set up the connection.
@@ -83,7 +76,10 @@
// EmptyDialOption does not alter the dial configuration. It can be embedded in
// another structure to build custom dial options.
//
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type EmptyDialOption struct{}
func (EmptyDialOption) apply(*dialOptions) {}
@@ -199,19 +195,6 @@
})
}
-// WithBalancer returns a DialOption which sets a load balancer with the v1 API.
-// Name resolver will be ignored if this DialOption is specified.
-//
-// Deprecated: use the new balancer APIs in balancer package and
-// WithBalancerName. Will be removed in a future 1.x release.
-func WithBalancer(b Balancer) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.balancerBuilder = &balancerWrapperBuilder{
- b: b,
- }
- })
-}
-
// WithBalancerName sets the balancer that the ClientConn will be initialized
// with. Balancer registered with balancerName will be used. This function
// panics if no balancer was registered by balancerName.
@@ -244,15 +227,14 @@
})
}
-// WithConnectParams configures the dialer to use the provided ConnectParams.
+// WithConnectParams configures the ClientConn to use the provided ConnectParams
+// for creating and maintaining connections to servers.
//
// The backoff configuration specified as part of the ConnectParams overrides
// all defaults specified in
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider
// using the backoff.DefaultConfig as a base, in cases where you want to
// override only a subset of the backoff configuration.
-//
-// This API is EXPERIMENTAL.
func WithConnectParams(p ConnectParams) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.bs = internalbackoff.Exponential{Config: p.Backoff}
@@ -290,7 +272,7 @@
})
}
-// WithBlock returns a DialOption which makes caller of Dial blocks until the
+// WithBlock returns a DialOption which makes callers of Dial block until the
// underlying connection is up. Without this, Dial returns immediately and
// connecting the server happens in background.
func WithBlock() DialOption {
@@ -299,22 +281,47 @@
})
}
+// WithReturnConnectionError returns a DialOption which makes the client connection
+// return a string containing both the last connection error that occurred and
+// the context.DeadlineExceeded error.
+// Implies WithBlock()
+//
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithReturnConnectionError() DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ o.block = true
+ o.returnLastError = true
+ })
+}
+
// WithInsecure returns a DialOption which disables transport security for this
-// ClientConn. Note that transport security is required unless WithInsecure is
-// set.
+// ClientConn. Under the hood, it uses insecure.NewCredentials().
+//
+// Note that using this DialOption with per-RPC credentials (through
+// WithCredentialsBundle or WithPerRPCCredentials) which require transport
+// security is incompatible and will cause grpc.Dial() to fail.
+//
+// Deprecated: use WithTransportCredentials and insecure.NewCredentials() instead.
+// Will be supported throughout 1.x.
func WithInsecure() DialOption {
return newFuncDialOption(func(o *dialOptions) {
- o.insecure = true
+ o.copts.TransportCredentials = insecure.NewCredentials()
})
}
// WithNoProxy returns a DialOption which disables the use of proxies for this
// ClientConn. This is ignored if WithDialer or WithContextDialer are used.
//
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func WithNoProxy() DialOption {
return newFuncDialOption(func(o *dialOptions) {
- o.withProxy = false
+ o.copts.UseProxy = false
})
}
@@ -339,7 +346,10 @@
// the ClientConn.WithCreds. This should not be used together with
// WithTransportCredentials.
//
-// This API is experimental.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func WithCredentialsBundle(b credentials.Bundle) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.copts.CredsBundle = b
@@ -404,7 +414,10 @@
// FailOnNonTempDialError only affects the initial dial, and does not do
// anything useful unless you are also using WithBlock().
//
-// This is an EXPERIMENTAL API.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func FailOnNonTempDialError(f bool) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.copts.FailOnNonTempDialError = f
@@ -423,7 +436,7 @@
// for the client transport.
func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
if kp.Time < internal.KeepaliveMinPingTime {
- grpclog.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
+ logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
kp.Time = internal.KeepaliveMinPingTime
}
return newFuncDialOption(func(o *dialOptions) {
@@ -459,7 +472,7 @@
}
// WithChainStreamInterceptor returns a DialOption that specifies the chained
-// interceptor for unary RPCs. The first interceptor will be the outer most,
+// interceptor for streaming RPCs. The first interceptor will be the outer most,
// while the last interceptor will be the inner most wrapper around the real call.
// All interceptors added by this method will be chained, and the interceptor
// defined by WithStreamInterceptor will always be prepended to the chain.
@@ -470,8 +483,7 @@
}
// WithAuthority returns a DialOption that specifies the value to be used as the
-// :authority pseudo-header. This value only works with WithInsecure and has no
-// effect if TransportCredentials are present.
+// :authority pseudo-header and as the server name in authentication handshake.
func WithAuthority(a string) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.authority = a
@@ -482,7 +494,10 @@
// current ClientConn's parent. This function is used in nested channel creation
// (e.g. grpclb dial).
//
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func WithChannelzParentID(id int64) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.channelzParentID = id
@@ -504,11 +519,16 @@
// WithDefaultServiceConfig returns a DialOption that configures the default
// service config, which will be used in cases where:
//
-// 1. WithDisableServiceConfig is also used.
-// 2. Resolver does not return a service config or if the resolver returns an
-// invalid service config.
+// 1. WithDisableServiceConfig is also used, or
//
-// This API is EXPERIMENTAL.
+// 2. The name resolver does not provide a service config or provides an
+// invalid service config.
+//
+// The parameter s is the JSON representation of the default service config.
+// For more information about service configs, see:
+// https://github.com/grpc/grpc/blob/master/doc/service_config.md
+// For a simple example of usage, see:
+// examples/features/load_balancing/client/main.go
func WithDefaultServiceConfig(s string) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.defaultServiceConfigRawJSON = &s
@@ -520,11 +540,8 @@
// will happen automatically if no data is written to the wire or if the RPC is
// unprocessed by the remote server.
//
-// Retry support is currently disabled by default, but will be enabled by
-// default in the future. Until then, it may be enabled by setting the
-// environment variable "GRPC_GO_RETRY" to "on".
-//
-// This API is EXPERIMENTAL.
+// Retry support is currently enabled by default, but may be disabled by
+// setting the environment variable "GRPC_GO_RETRY" to "off".
func WithDisableRetry() DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.disableRetry = true
@@ -542,7 +559,10 @@
// WithDisableHealthCheck disables the LB channel health checking for all
// SubConns of this ClientConn.
//
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func WithDisableHealthCheck() DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.disableHealthCheck = true
@@ -561,14 +581,12 @@
func defaultDialOptions() dialOptions {
return dialOptions{
- disableRetry: !envconfig.Retry,
healthCheckFunc: internal.HealthCheckFunc,
copts: transport.ConnectOptions{
WriteBufferSize: defaultWriteBufSize,
ReadBufferSize: defaultReadBufSize,
+ UseProxy: true,
},
- resolveNowBackoff: internalbackoff.DefaultExponential.Backoff,
- withProxy: true,
}
}
@@ -583,22 +601,15 @@
})
}
-// withResolveNowBackoff specifies the function that clientconn uses to backoff
-// between successive calls to resolver.ResolveNow().
-//
-// For testing purpose only.
-func withResolveNowBackoff(f func(int) time.Duration) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.resolveNowBackoff = f
- })
-}
-
// WithResolvers allows a list of resolver implementations to be registered
// locally with the ClientConn without needing to be globally registered via
// resolver.Register. They will be matched against the scheme used for the
// current Dial only, and will take precedence over the global registry.
//
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func WithResolvers(rs ...resolver.Builder) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.resolvers = append(o.resolvers, rs...)
diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go
index 187adbb..0022859 100644
--- a/vendor/google.golang.org/grpc/doc.go
+++ b/vendor/google.golang.org/grpc/doc.go
@@ -16,6 +16,8 @@
*
*/
+//go:generate ./regenerate.sh
+
/*
Package grpc implements an RPC system called gRPC.
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
index 195e844..6d84f74 100644
--- a/vendor/google.golang.org/grpc/encoding/encoding.go
+++ b/vendor/google.golang.org/grpc/encoding/encoding.go
@@ -19,7 +19,10 @@
// Package encoding defines the interface for the compressor and codec, and
// functions to register and retrieve compressors and codecs.
//
-// This package is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
package encoding
import (
@@ -46,10 +49,15 @@
// coding header. The result must be static; the result cannot change
// between calls.
Name() string
- // EXPERIMENTAL: if a Compressor implements
+ // If a Compressor implements
// DecompressedSize(compressedBytes []byte) int, gRPC will call it
// to determine the size of the buffer allocated for the result of decompression.
// Return -1 to indicate unknown size.
+ //
+ // Experimental
+ //
+ // Notice: This API is EXPERIMENTAL and may be changed or removed in a
+ // later release.
}
var registeredCompressor = make(map[string]Compressor)
diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go
index 66b97a6..3009b35 100644
--- a/vendor/google.golang.org/grpc/encoding/proto/proto.go
+++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go
@@ -21,8 +21,7 @@
package proto
import (
- "math"
- "sync"
+ "fmt"
"github.com/golang/protobuf/proto"
"google.golang.org/grpc/encoding"
@@ -38,73 +37,22 @@
// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
type codec struct{}
-type cachedProtoBuffer struct {
- lastMarshaledSize uint32
- proto.Buffer
-}
-
-func capToMaxInt32(val int) uint32 {
- if val > math.MaxInt32 {
- return uint32(math.MaxInt32)
- }
- return uint32(val)
-}
-
-func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
- protoMsg := v.(proto.Message)
- newSlice := make([]byte, 0, cb.lastMarshaledSize)
-
- cb.SetBuf(newSlice)
- cb.Reset()
- if err := cb.Marshal(protoMsg); err != nil {
- return nil, err
- }
- out := cb.Bytes()
- cb.lastMarshaledSize = capToMaxInt32(len(out))
- return out, nil
-}
-
func (codec) Marshal(v interface{}) ([]byte, error) {
- if pm, ok := v.(proto.Marshaler); ok {
- // object can marshal itself, no need for buffer
- return pm.Marshal()
+ vv, ok := v.(proto.Message)
+ if !ok {
+ return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
}
-
- cb := protoBufferPool.Get().(*cachedProtoBuffer)
- out, err := marshal(v, cb)
-
- // put back buffer and lose the ref to the slice
- cb.SetBuf(nil)
- protoBufferPool.Put(cb)
- return out, err
+ return proto.Marshal(vv)
}
func (codec) Unmarshal(data []byte, v interface{}) error {
- protoMsg := v.(proto.Message)
- protoMsg.Reset()
-
- if pu, ok := protoMsg.(proto.Unmarshaler); ok {
- // object can unmarshal itself, no need for buffer
- return pu.Unmarshal(data)
+ vv, ok := v.(proto.Message)
+ if !ok {
+ return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
}
-
- cb := protoBufferPool.Get().(*cachedProtoBuffer)
- cb.SetBuf(data)
- err := cb.Unmarshal(protoMsg)
- cb.SetBuf(nil)
- protoBufferPool.Put(cb)
- return err
+ return proto.Unmarshal(data, vv)
}
func (codec) Name() string {
return Name
}
-
-var protoBufferPool = &sync.Pool{
- New: func() interface{} {
- return &cachedProtoBuffer{
- Buffer: proto.Buffer{},
- lastMarshaledSize: 16,
- }
- },
-}
diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod
index ecef1ab..fcffdce 100644
--- a/vendor/google.golang.org/grpc/go.mod
+++ b/vendor/google.golang.org/grpc/go.mod
@@ -1,16 +1,19 @@
module google.golang.org/grpc
-go 1.11
+go 1.14
require (
- github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f
- github.com/envoyproxy/go-control-plane v0.9.4
+ github.com/cespare/xxhash/v2 v2.1.1
+ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4
+ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1
+ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
- github.com/golang/mock v1.1.1
- github.com/golang/protobuf v1.3.3
- github.com/google/go-cmp v0.2.0
- golang.org/x/net v0.0.0-20190311183353-d8887717615a
- golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
- golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a
- google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55
+ github.com/golang/protobuf v1.4.3
+ github.com/google/go-cmp v0.5.0
+ github.com/google/uuid v1.1.2
+ golang.org/x/net v0.0.0-20200822124328-c89045814202
+ golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
+ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd
+ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013
+ google.golang.org/protobuf v1.25.0
)
diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum
index 0bf9f07..8b542e0 100644
--- a/vendor/google.golang.org/grpc/go.sum
+++ b/vendor/google.golang.org/grpc/go.sum
@@ -1,64 +1,129 @@
-cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
+github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4 h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021 h1:fP+fF0up6oPY49OrjPrhIJ8yQfdIM85NXMLkMg1EXVs=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go
new file mode 100644
index 0000000..8358dd6
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/component.go
@@ -0,0 +1,117 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpclog
+
+import (
+ "fmt"
+
+ "google.golang.org/grpc/internal/grpclog"
+)
+
+// componentData records the settings for a component.
+type componentData struct {
+ name string
+}
+
+var cache = map[string]*componentData{}
+
+func (c *componentData) InfoDepth(depth int, args ...interface{}) {
+ args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
+ grpclog.InfoDepth(depth+1, args...)
+}
+
+func (c *componentData) WarningDepth(depth int, args ...interface{}) {
+ args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
+ grpclog.WarningDepth(depth+1, args...)
+}
+
+func (c *componentData) ErrorDepth(depth int, args ...interface{}) {
+ args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
+ grpclog.ErrorDepth(depth+1, args...)
+}
+
+func (c *componentData) FatalDepth(depth int, args ...interface{}) {
+ args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
+ grpclog.FatalDepth(depth+1, args...)
+}
+
+func (c *componentData) Info(args ...interface{}) {
+ c.InfoDepth(1, args...)
+}
+
+func (c *componentData) Warning(args ...interface{}) {
+ c.WarningDepth(1, args...)
+}
+
+func (c *componentData) Error(args ...interface{}) {
+ c.ErrorDepth(1, args...)
+}
+
+func (c *componentData) Fatal(args ...interface{}) {
+ c.FatalDepth(1, args...)
+}
+
+func (c *componentData) Infof(format string, args ...interface{}) {
+ c.InfoDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Warningf(format string, args ...interface{}) {
+ c.WarningDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Errorf(format string, args ...interface{}) {
+ c.ErrorDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Fatalf(format string, args ...interface{}) {
+ c.FatalDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Infoln(args ...interface{}) {
+ c.InfoDepth(1, args...)
+}
+
+func (c *componentData) Warningln(args ...interface{}) {
+ c.WarningDepth(1, args...)
+}
+
+func (c *componentData) Errorln(args ...interface{}) {
+ c.ErrorDepth(1, args...)
+}
+
+func (c *componentData) Fatalln(args ...interface{}) {
+ c.FatalDepth(1, args...)
+}
+
+func (c *componentData) V(l int) bool {
+ return V(l)
+}
+
+// Component creates a new component and returns it for logging. If a component
+// with the name already exists, nothing will be created and it will be
+// returned. SetLoggerV2 will panic if it is called with a logger created by
+// Component.
+func Component(componentName string) DepthLoggerV2 {
+ if cData, ok := cache[componentName]; ok {
+ return cData
+ }
+ c := &componentData{componentName}
+ cache[componentName] = c
+ return c
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
index 23612b7..7c1f664 100644
--- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go
+++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
@@ -19,11 +19,14 @@
package grpclog
import (
+ "encoding/json"
+ "fmt"
"io"
"io/ioutil"
"log"
"os"
"strconv"
+ "strings"
"google.golang.org/grpc/internal/grpclog"
)
@@ -67,6 +70,9 @@
// SetLoggerV2 sets logger that is used in grpc to a V2 logger.
// Not mutex-protected, should be called before any gRPC functions.
func SetLoggerV2(l LoggerV2) {
+ if _, ok := l.(*componentData); ok {
+ panic("cannot use component logger as grpclog logger")
+ }
grpclog.Logger = l
grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2)
}
@@ -92,8 +98,9 @@
// loggerT is the default logger used by grpclog.
type loggerT struct {
- m []*log.Logger
- v int
+ m []*log.Logger
+ v int
+ jsonFormat bool
}
// NewLoggerV2 creates a loggerV2 with the provided writers.
@@ -102,19 +109,32 @@
// Warning logs will be written to warningW and infoW.
// Info logs will be written to infoW.
func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 {
- return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0)
+ return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{})
}
// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and
// verbosity level.
func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 {
+ return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v})
+}
+
+type loggerV2Config struct {
+ verbose int
+ jsonFormat bool
+}
+
+func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 {
var m []*log.Logger
- m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags))
- m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags))
+ flag := log.LstdFlags
+ if c.jsonFormat {
+ flag = 0
+ }
+ m = append(m, log.New(infoW, "", flag))
+ m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag))
ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
- m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags))
- m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags))
- return &loggerT{m: m, v: v}
+ m = append(m, log.New(ew, "", flag))
+ m = append(m, log.New(ew, "", flag))
+ return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat}
}
// newLoggerV2 creates a loggerV2 to be used as default logger.
@@ -139,58 +159,79 @@
if vl, err := strconv.Atoi(vLevel); err == nil {
v = vl
}
- return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v)
+
+ jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json")
+
+ return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{
+ verbose: v,
+ jsonFormat: jsonFormat,
+ })
+}
+
+func (g *loggerT) output(severity int, s string) {
+ sevStr := severityName[severity]
+ if !g.jsonFormat {
+ g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s))
+ return
+ }
+ // TODO: we can also include the logging component, but that needs more
+ // (API) changes.
+ b, _ := json.Marshal(map[string]string{
+ "severity": sevStr,
+ "message": s,
+ })
+ g.m[severity].Output(2, string(b))
}
func (g *loggerT) Info(args ...interface{}) {
- g.m[infoLog].Print(args...)
+ g.output(infoLog, fmt.Sprint(args...))
}
func (g *loggerT) Infoln(args ...interface{}) {
- g.m[infoLog].Println(args...)
+ g.output(infoLog, fmt.Sprintln(args...))
}
func (g *loggerT) Infof(format string, args ...interface{}) {
- g.m[infoLog].Printf(format, args...)
+ g.output(infoLog, fmt.Sprintf(format, args...))
}
func (g *loggerT) Warning(args ...interface{}) {
- g.m[warningLog].Print(args...)
+ g.output(warningLog, fmt.Sprint(args...))
}
func (g *loggerT) Warningln(args ...interface{}) {
- g.m[warningLog].Println(args...)
+ g.output(warningLog, fmt.Sprintln(args...))
}
func (g *loggerT) Warningf(format string, args ...interface{}) {
- g.m[warningLog].Printf(format, args...)
+ g.output(warningLog, fmt.Sprintf(format, args...))
}
func (g *loggerT) Error(args ...interface{}) {
- g.m[errorLog].Print(args...)
+ g.output(errorLog, fmt.Sprint(args...))
}
func (g *loggerT) Errorln(args ...interface{}) {
- g.m[errorLog].Println(args...)
+ g.output(errorLog, fmt.Sprintln(args...))
}
func (g *loggerT) Errorf(format string, args ...interface{}) {
- g.m[errorLog].Printf(format, args...)
+ g.output(errorLog, fmt.Sprintf(format, args...))
}
func (g *loggerT) Fatal(args ...interface{}) {
- g.m[fatalLog].Fatal(args...)
- // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
+ g.output(fatalLog, fmt.Sprint(args...))
+ os.Exit(1)
}
func (g *loggerT) Fatalln(args ...interface{}) {
- g.m[fatalLog].Fatalln(args...)
- // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
+ g.output(fatalLog, fmt.Sprintln(args...))
+ os.Exit(1)
}
func (g *loggerT) Fatalf(format string, args ...interface{}) {
- g.m[fatalLog].Fatalf(format, args...)
- // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
+ g.output(fatalLog, fmt.Sprintf(format, args...))
+ os.Exit(1)
}
func (g *loggerT) V(l int) bool {
@@ -201,14 +242,18 @@
// DepthLoggerV2, the below functions will be called with the appropriate stack
// depth set for trivial functions the logger may ignore.
//
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type DepthLoggerV2 interface {
- // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print.
+ LoggerV2
+ // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
InfoDepth(depth int, args ...interface{})
- // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print.
+ // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
WarningDepth(depth int, args ...interface{})
- // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print.
+ // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
ErrorDepth(depth int, args ...interface{})
- // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print.
+ // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
FatalDepth(depth int, args ...interface{})
}
diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh
deleted file mode 100644
index 7c7bcad..0000000
--- a/vendor/google.golang.org/grpc/install_gae.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-
-TMP=$(mktemp -d /tmp/sdk.XXX) \
-&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \
-&& unzip -q $TMP.zip -d $TMP \
-&& export PATH="$PATH:$TMP/go_appengine"
diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go
index 8b73500..668e0ad 100644
--- a/vendor/google.golang.org/grpc/interceptor.go
+++ b/vendor/google.golang.org/grpc/interceptor.go
@@ -25,17 +25,41 @@
// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error
-// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC
-// and it is the responsibility of the interceptor to call it.
-// This is an EXPERIMENTAL API.
+// UnaryClientInterceptor intercepts the execution of a unary RPC on the client.
+// Unary interceptors can be specified as a DialOption, using
+// WithUnaryInterceptor() or WithChainUnaryInterceptor(), when creating a
+// ClientConn. When a unary interceptor(s) is set on a ClientConn, gRPC
+// delegates all unary RPC invocations to the interceptor, and it is the
+// responsibility of the interceptor to call invoker to complete the processing
+// of the RPC.
+//
+// method is the RPC name. req and reply are the corresponding request and
+// response messages. cc is the ClientConn on which the RPC was invoked. invoker
+// is the handler to complete the RPC and it is the responsibility of the
+// interceptor to call it. opts contain all applicable call options, including
+// defaults from the ClientConn as well as per-call options.
+//
+// The returned error must be compatible with the status package.
type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
// Streamer is called by StreamClientInterceptor to create a ClientStream.
type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error)
-// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O
-// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it.
-// This is an EXPERIMENTAL API.
+// StreamClientInterceptor intercepts the creation of a ClientStream. Stream
+// interceptors can be specified as a DialOption, using WithStreamInterceptor()
+// or WithChainStreamInterceptor(), when creating a ClientConn. When a stream
+// interceptor(s) is set on the ClientConn, gRPC delegates all stream creations
+// to the interceptor, and it is the responsibility of the interceptor to call
+// streamer.
+//
+// desc contains a description of the stream. cc is the ClientConn on which the
+// RPC was invoked. streamer is the handler to create a ClientStream and it is
+// the responsibility of the interceptor to call it. opts contain all applicable
+// call options, including defaults from the ClientConn as well as per-call
+// options.
+//
+// StreamClientInterceptor may return a custom ClientStream to intercept all I/O
+// operations. The returned error must be compatible with the status package.
type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error)
// UnaryServerInfo consists of various information about a unary RPC on
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
index 8b10516..5cc3aed 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
@@ -25,6 +25,7 @@
"os"
"google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal/grpcutil"
)
// Logger is the global binary logger. It can be used to get binary logger for
@@ -39,6 +40,8 @@
// It is used to get a methodLogger for each individual method.
var binLogger Logger
+var grpclogLogger = grpclog.Component("binarylog")
+
// SetLogger sets the binarg logger.
//
// Only call this at init time.
@@ -146,9 +149,9 @@
// Each methodLogger returned by this method is a new instance. This is to
// generate sequence id within the call.
func (l *logger) getMethodLogger(methodName string) *MethodLogger {
- s, m, err := parseMethodName(methodName)
+ s, m, err := grpcutil.ParseMethod(methodName)
if err != nil {
- grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err)
+ grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err)
return nil
}
if ml, ok := l.methods[s+"/"+m]; ok {
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
index be30d0e..d8f4e76 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
@@ -24,8 +24,6 @@
"regexp"
"strconv"
"strings"
-
- "google.golang.org/grpc/grpclog"
)
// NewLoggerFromConfigString reads the string and build a logger. It can be used
@@ -52,7 +50,7 @@
methods := strings.Split(s, ",")
for _, method := range methods {
if err := l.fillMethodLoggerWithConfigString(method); err != nil {
- grpclog.Warningf("failed to parse binary log config: %v", err)
+ grpclogLogger.Warningf("failed to parse binary log config: %v", err)
return nil
}
}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
index 160f6e8..0cdb418 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
@@ -27,7 +27,6 @@
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
- "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
@@ -66,7 +65,7 @@
callID: idGen.next(),
idWithinCallGen: &callIDGenerator{},
- sink: defaultSink, // TODO(blog): make it plugable.
+ sink: DefaultSink, // TODO(blog): make it plugable.
}
}
@@ -219,12 +218,12 @@
if m, ok := c.Message.(proto.Message); ok {
data, err = proto.Marshal(m)
if err != nil {
- grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
+ grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
}
} else if b, ok := c.Message.([]byte); ok {
data = b
} else {
- grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
+ grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
}
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
@@ -259,12 +258,12 @@
if m, ok := c.Message.(proto.Message); ok {
data, err = proto.Marshal(m)
if err != nil {
- grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
+ grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
}
} else if b, ok := c.Message.([]byte); ok {
data = b
} else {
- grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
+ grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
}
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
@@ -315,7 +314,7 @@
func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
st, ok := status.FromError(c.Err)
if !ok {
- grpclog.Info("binarylogging: error in trailer is not a status error")
+ grpclogLogger.Info("binarylogging: error in trailer is not a status error")
}
var (
detailsBytes []byte
@@ -325,7 +324,7 @@
if stProto != nil && len(stProto.Details) != 0 {
detailsBytes, err = proto.Marshal(stProto)
if err != nil {
- grpclog.Infof("binarylogging: failed to marshal status proto: %v", err)
+ grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
}
}
ret := &pb.GrpcLogEntry{
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh b/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh
deleted file mode 100644
index 113d40c..0000000
--- a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-# Copyright 2018 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -eux -o pipefail
-
-TMP=$(mktemp -d)
-
-function finish {
- rm -rf "$TMP"
-}
-trap finish EXIT
-
-pushd "$TMP"
-mkdir -p grpc/binarylog/grpc_binarylog_v1
-curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto
-
-protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto
-popd
-rm -f ./grpc_binarylog_v1/*.pb.go
-cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/
-
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
index a2e7c34..c2fdd58 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
@@ -21,32 +21,23 @@
import (
"bufio"
"encoding/binary"
- "fmt"
"io"
- "io/ioutil"
"sync"
"time"
"github.com/golang/protobuf/proto"
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
- "google.golang.org/grpc/grpclog"
)
var (
- defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
+ // DefaultSink is the sink where the logs will be written to. It's exported
+ // for the binarylog package to update.
+ DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
)
-// SetDefaultSink sets the sink where binary logs will be written to.
-//
-// Not thread safe. Only set during initialization.
-func SetDefaultSink(s Sink) {
- if defaultSink != nil {
- defaultSink.Close()
- }
- defaultSink = s
-}
-
// Sink writes log entry into the binary log sink.
+//
+// sink is a copy of the exported binarylog.Sink, to avoid circular dependency.
type Sink interface {
// Write will be called to write the log entry into the sink.
//
@@ -67,7 +58,7 @@
// message is prefixed with a 4 byte big endian unsigned integer as the length.
//
// No buffer is done, Close() doesn't try to close the writer.
-func newWriterSink(w io.Writer) *writerSink {
+func newWriterSink(w io.Writer) Sink {
return &writerSink{out: w}
}
@@ -78,7 +69,8 @@
func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
b, err := proto.Marshal(e)
if err != nil {
- grpclog.Infof("binary logging: failed to marshal proto message: %v", err)
+ grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err)
+ return err
}
hdr := make([]byte, 4)
binary.BigEndian.PutUint32(hdr, uint32(len(b)))
@@ -93,25 +85,28 @@
func (ws *writerSink) Close() error { return nil }
-type bufWriteCloserSink struct {
- mu sync.Mutex
- closer io.Closer
- out *writerSink // out is built on buf.
- buf *bufio.Writer // buf is kept for flush.
+type bufferedSink struct {
+ mu sync.Mutex
+ closer io.Closer
+ out Sink // out is built on buf.
+ buf *bufio.Writer // buf is kept for flush.
+ flusherStarted bool
- writeStartOnce sync.Once
- writeTicker *time.Ticker
+ writeTicker *time.Ticker
+ done chan struct{}
}
-func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error {
- // Start the write loop when Write is called.
- fs.writeStartOnce.Do(fs.startFlushGoroutine)
+func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error {
fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if !fs.flusherStarted {
+ // Start the write loop when Write is called.
+ fs.startFlushGoroutine()
+ fs.flusherStarted = true
+ }
if err := fs.out.Write(e); err != nil {
- fs.mu.Unlock()
return err
}
- fs.mu.Unlock()
return nil
}
@@ -119,44 +114,57 @@
bufFlushDuration = 60 * time.Second
)
-func (fs *bufWriteCloserSink) startFlushGoroutine() {
+func (fs *bufferedSink) startFlushGoroutine() {
fs.writeTicker = time.NewTicker(bufFlushDuration)
go func() {
- for range fs.writeTicker.C {
+ for {
+ select {
+ case <-fs.done:
+ return
+ case <-fs.writeTicker.C:
+ }
fs.mu.Lock()
- fs.buf.Flush()
+ if err := fs.buf.Flush(); err != nil {
+ grpclogLogger.Warningf("failed to flush to Sink: %v", err)
+ }
fs.mu.Unlock()
}
}()
}
-func (fs *bufWriteCloserSink) Close() error {
+func (fs *bufferedSink) Close() error {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
if fs.writeTicker != nil {
fs.writeTicker.Stop()
}
- fs.mu.Lock()
- fs.buf.Flush()
- fs.closer.Close()
- fs.out.Close()
- fs.mu.Unlock()
+ close(fs.done)
+ if err := fs.buf.Flush(); err != nil {
+ grpclogLogger.Warningf("failed to flush to Sink: %v", err)
+ }
+ if err := fs.closer.Close(); err != nil {
+ grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err)
+ }
+ if err := fs.out.Close(); err != nil {
+ grpclogLogger.Warningf("failed to close the Sink: %v", err)
+ }
return nil
}
-func newBufWriteCloserSink(o io.WriteCloser) Sink {
+// NewBufferedSink creates a binary log sink with the given WriteCloser.
+//
+// Write() marshals the proto message and writes it to the given writer. Each
+// message is prefixed with a 4 byte big endian unsigned integer as the length.
+//
+// Content is kept in a buffer, and is flushed every 60 seconds.
+//
+// Close closes the WriteCloser.
+func NewBufferedSink(o io.WriteCloser) Sink {
bufW := bufio.NewWriter(o)
- return &bufWriteCloserSink{
+ return &bufferedSink{
closer: o,
out: newWriterSink(bufW),
buf: bufW,
+ done: make(chan struct{}),
}
}
-
-// NewTempFileSink creates a temp file and returns a Sink that writes to this
-// file.
-func NewTempFileSink() (Sink, error) {
- tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt")
- if err != nil {
- return nil, fmt.Errorf("failed to create temp file: %v", err)
- }
- return newBufWriteCloserSink(tempFile), nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/util.go b/vendor/google.golang.org/grpc/internal/binarylog/util.go
deleted file mode 100644
index 15dc780..0000000
--- a/vendor/google.golang.org/grpc/internal/binarylog/util.go
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package binarylog
-
-import (
- "errors"
- "strings"
-)
-
-// parseMethodName splits service and method from the input. It expects format
-// "/service/method".
-//
-// TODO: move to internal/grpcutil.
-func parseMethodName(methodName string) (service, method string, _ error) {
- if !strings.HasPrefix(methodName, "/") {
- return "", "", errors.New("invalid method name: should start with /")
- }
- methodName = methodName[1:]
-
- pos := strings.LastIndex(methodName, "/")
- if pos < 0 {
- return "", "", errors.New("invalid method name: suffix /method is missing")
- }
- return methodName[:pos], methodName[pos+1:], nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
index e4252e5..cd18075 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
@@ -30,7 +30,7 @@
"sync/atomic"
"time"
- "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/grpclog"
)
const (
@@ -204,9 +204,9 @@
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
}
if pid == 0 {
- db.get().addChannel(id, cn, true, pid, ref)
+ db.get().addChannel(id, cn, true, pid)
} else {
- db.get().addChannel(id, cn, false, pid, ref)
+ db.get().addChannel(id, cn, false, pid)
}
return id
}
@@ -216,7 +216,7 @@
// by pid). It returns the unique channelz tracking id assigned to this subchannel.
func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
if pid == 0 {
- grpclog.ErrorDepth(0, "a SubChannel's parent id cannot be 0")
+ logger.Error("a SubChannel's parent id cannot be 0")
return 0
}
id := idGen.genID()
@@ -228,7 +228,7 @@
pid: pid,
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
}
- db.get().addSubChannel(id, sc, pid, ref)
+ db.get().addSubChannel(id, sc, pid)
return id
}
@@ -253,12 +253,12 @@
// this listen socket.
func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
if pid == 0 {
- grpclog.ErrorDepth(0, "a ListenSocket's parent id cannot be 0")
+ logger.Error("a ListenSocket's parent id cannot be 0")
return 0
}
id := idGen.genID()
ls := &listenSocket{refName: ref, s: s, id: id, pid: pid}
- db.get().addListenSocket(id, ls, pid, ref)
+ db.get().addListenSocket(id, ls, pid)
return id
}
@@ -268,16 +268,16 @@
// this normal socket.
func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
if pid == 0 {
- grpclog.ErrorDepth(0, "a NormalSocket's parent id cannot be 0")
+ logger.Error("a NormalSocket's parent id cannot be 0")
return 0
}
id := idGen.genID()
ns := &normalSocket{refName: ref, s: s, id: id, pid: pid}
- db.get().addNormalSocket(id, ns, pid, ref)
+ db.get().addNormalSocket(id, ns, pid)
return id
}
-// RemoveEntry removes an entry with unique channelz trakcing id to be id from
+// RemoveEntry removes an entry with unique channelz tracking id to be id from
// channelz database.
func RemoveEntry(id int64) {
db.get().removeEntry(id)
@@ -294,17 +294,15 @@
}
// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
-func AddTraceEvent(id int64, depth int, desc *TraceEventDesc) {
+func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) {
for d := desc; d != nil; d = d.Parent {
switch d.Severity {
- case CtUNKNOWN:
- grpclog.InfoDepth(depth+1, d.Desc)
- case CtINFO:
- grpclog.InfoDepth(depth+1, d.Desc)
+ case CtUnknown, CtInfo:
+ l.InfoDepth(depth+1, d.Desc)
case CtWarning:
- grpclog.WarningDepth(depth+1, d.Desc)
+ l.WarningDepth(depth+1, d.Desc)
case CtError:
- grpclog.ErrorDepth(depth+1, d.Desc)
+ l.ErrorDepth(depth+1, d.Desc)
}
}
if getMaxTraceEntry() == 0 {
@@ -335,7 +333,7 @@
c.mu.Unlock()
}
-func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) {
+func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) {
c.mu.Lock()
cn.cm = c
cn.trace.cm = c
@@ -348,7 +346,7 @@
c.mu.Unlock()
}
-func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) {
+func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) {
c.mu.Lock()
sc.cm = c
sc.trace.cm = c
@@ -357,7 +355,7 @@
c.mu.Unlock()
}
-func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) {
+func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) {
c.mu.Lock()
ls.cm = c
c.listenSockets[id] = ls
@@ -365,7 +363,7 @@
c.mu.Unlock()
}
-func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) {
+func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) {
c.mu.Lock()
ns.cm = c
c.normalSockets[id] = ns
@@ -632,7 +630,7 @@
if count == 0 {
end = true
}
- var s []*SocketMetric
+ s := make([]*SocketMetric, 0, len(sks))
for _, ns := range sks {
sm := &SocketMetric{}
sm.SocketData = ns.s.ChannelzMetric()
diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go
index 59c7bed..b0013f9 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/logging.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go
@@ -21,80 +21,82 @@
import (
"fmt"
- "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/grpclog"
)
-// Info logs through grpclog.Info and adds a trace event if channelz is on.
-func Info(id int64, args ...interface{}) {
+var logger = grpclog.Component("channelz")
+
+// Info logs and adds a trace event if channelz is on.
+func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
if IsOn() {
- AddTraceEvent(id, 1, &TraceEventDesc{
+ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprint(args...),
- Severity: CtINFO,
+ Severity: CtInfo,
})
} else {
- grpclog.InfoDepth(1, args...)
+ l.InfoDepth(1, args...)
}
}
-// Infof logs through grpclog.Infof and adds a trace event if channelz is on.
-func Infof(id int64, format string, args ...interface{}) {
+// Infof logs and adds a trace event if channelz is on.
+func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
if IsOn() {
- AddTraceEvent(id, 1, &TraceEventDesc{
+ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: msg,
- Severity: CtINFO,
+ Severity: CtInfo,
})
} else {
- grpclog.InfoDepth(1, msg)
+ l.InfoDepth(1, msg)
}
}
-// Warning logs through grpclog.Warning and adds a trace event if channelz is on.
-func Warning(id int64, args ...interface{}) {
+// Warning logs and adds a trace event if channelz is on.
+func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
if IsOn() {
- AddTraceEvent(id, 1, &TraceEventDesc{
+ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprint(args...),
Severity: CtWarning,
})
} else {
- grpclog.WarningDepth(1, args...)
+ l.WarningDepth(1, args...)
}
}
-// Warningf logs through grpclog.Warningf and adds a trace event if channelz is on.
-func Warningf(id int64, format string, args ...interface{}) {
+// Warningf logs and adds a trace event if channelz is on.
+func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
if IsOn() {
- AddTraceEvent(id, 1, &TraceEventDesc{
+ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: msg,
Severity: CtWarning,
})
} else {
- grpclog.WarningDepth(1, msg)
+ l.WarningDepth(1, msg)
}
}
-// Error logs through grpclog.Error and adds a trace event if channelz is on.
-func Error(id int64, args ...interface{}) {
+// Error logs and adds a trace event if channelz is on.
+func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
if IsOn() {
- AddTraceEvent(id, 1, &TraceEventDesc{
+ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprint(args...),
Severity: CtError,
})
} else {
- grpclog.ErrorDepth(1, args...)
+ l.ErrorDepth(1, args...)
}
}
-// Errorf logs through grpclog.Errorf and adds a trace event if channelz is on.
-func Errorf(id int64, format string, args ...interface{}) {
+// Errorf logs and adds a trace event if channelz is on.
+func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
if IsOn() {
- AddTraceEvent(id, 1, &TraceEventDesc{
+ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: msg,
Severity: CtError,
})
} else {
- grpclog.ErrorDepth(1, msg)
+ l.ErrorDepth(1, msg)
}
}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go
index 17c2274..3c595d1 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/types.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/types.go
@@ -26,7 +26,6 @@
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
)
// entry represents a node in the channelz database.
@@ -60,17 +59,17 @@
// the addrConn will create a new transport. And when registering the new transport in
// channelz, its parent addrConn could have already been torn down and deleted
// from channelz tracking, and thus reach the code here.
- grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
+ logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
}
func (d *dummyEntry) deleteChild(id int64) {
// It is possible for a normal program to reach here under race condition.
// Refer to the example described in addChild().
- grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
+ logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
}
func (d *dummyEntry) triggerDelete() {
- grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
+ logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
}
func (*dummyEntry) deleteSelfIfReady() {
@@ -215,7 +214,7 @@
case *channel:
c.nestedChans[id] = v.refName
default:
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
+ logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
}
}
@@ -326,7 +325,7 @@
if v, ok := e.(*normalSocket); ok {
sc.sockets[id] = v.refName
} else {
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
+ logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
}
}
@@ -493,11 +492,11 @@
}
func (ls *listenSocket) addChild(id int64, e entry) {
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
+ logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
}
func (ls *listenSocket) deleteChild(id int64) {
- grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id)
+ logger.Errorf("cannot delete a child (id = %d) from a listen socket", id)
}
func (ls *listenSocket) triggerDelete() {
@@ -506,7 +505,7 @@
}
func (ls *listenSocket) deleteSelfIfReady() {
- grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket")
+ logger.Errorf("cannot call deleteSelfIfReady on a listen socket")
}
func (ls *listenSocket) getParentID() int64 {
@@ -522,11 +521,11 @@
}
func (ns *normalSocket) addChild(id int64, e entry) {
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
+ logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
}
func (ns *normalSocket) deleteChild(id int64) {
- grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id)
+ logger.Errorf("cannot delete a child (id = %d) from a normal socket", id)
}
func (ns *normalSocket) triggerDelete() {
@@ -535,7 +534,7 @@
}
func (ns *normalSocket) deleteSelfIfReady() {
- grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket")
+ logger.Errorf("cannot call deleteSelfIfReady on a normal socket")
}
func (ns *normalSocket) getParentID() int64 {
@@ -594,7 +593,7 @@
case *listenSocket:
s.listenSockets[id] = v.refName
default:
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
+ logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
}
}
@@ -673,10 +672,10 @@
type Severity int
const (
- // CtUNKNOWN indicates unknown severity of a trace event.
- CtUNKNOWN Severity = iota
- // CtINFO indicates info level severity of a trace event.
- CtINFO
+ // CtUnknown indicates unknown severity of a trace event.
+ CtUnknown Severity = iota
+ // CtInfo indicates info level severity of a trace event.
+ CtInfo
// CtWarning indicates warning level severity of a trace event.
CtWarning
// CtError indicates error level severity of a trace event.
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
index 692dd61..1b1c4cc 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
@@ -1,5 +1,3 @@
-// +build !appengine
-
/*
*
* Copyright 2018 gRPC authors.
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
index 79edbef..8b06eed 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
@@ -1,4 +1,5 @@
-// +build !linux appengine
+//go:build !linux
+// +build !linux
/*
*
@@ -22,8 +23,6 @@
import (
"sync"
-
- "google.golang.org/grpc/grpclog"
)
var once sync.Once
@@ -39,6 +38,6 @@
// Windows OS doesn't support Socket Option
func (s *SocketOptionData) Getsockopt(fd uintptr) {
once.Do(func() {
- grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.")
+ logger.Warning("Channelz: socket options are not supported on non-linux environments")
})
}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
index fdf409d..8d194e4 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
@@ -1,5 +1,3 @@
-// +build linux,!appengine
-
/*
*
* Copyright 2018 gRPC authors.
diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
index 8864a08..837ddc4 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
@@ -1,4 +1,5 @@
-// +build !linux appengine
+//go:build !linux
+// +build !linux
/*
*
diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go
new file mode 100644
index 0000000..32c9b59
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "context"
+)
+
+// requestInfoKey is a struct to be used as the key to store RequestInfo in a
+// context.
+type requestInfoKey struct{}
+
+// NewRequestInfoContext creates a context with ri.
+func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context {
+ return context.WithValue(ctx, requestInfoKey{}, ri)
+}
+
+// RequestInfoFromContext extracts the RequestInfo from ctx.
+func RequestInfoFromContext(ctx context.Context) interface{} {
+ return ctx.Value(requestInfoKey{})
+}
+
+// clientHandshakeInfoKey is a struct used as the key to store
+// ClientHandshakeInfo in a context.
+type clientHandshakeInfoKey struct{}
+
+// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx.
+func ClientHandshakeInfoFromContext(ctx context.Context) interface{} {
+ return ctx.Value(clientHandshakeInfoKey{})
+}
+
+// NewClientHandshakeInfoContext creates a context with chi.
+func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context {
+ return context.WithValue(ctx, clientHandshakeInfoKey{}, chi)
+}
diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go
new file mode 100644
index 0000000..25ade62
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go
@@ -0,0 +1,75 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package credentials defines APIs for parsing SPIFFE ID.
+//
+// All APIs in this package are experimental.
+package credentials
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "net/url"
+
+ "google.golang.org/grpc/grpclog"
+)
+
+var logger = grpclog.Component("credentials")
+
+// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format
+// is invalid, return nil with warning.
+func SPIFFEIDFromState(state tls.ConnectionState) *url.URL {
+ if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 {
+ return nil
+ }
+ return SPIFFEIDFromCert(state.PeerCertificates[0])
+}
+
+// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE
+// ID format is invalid, return nil with warning.
+func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL {
+ if cert == nil || cert.URIs == nil {
+ return nil
+ }
+ var spiffeID *url.URL
+ for _, uri := range cert.URIs {
+ if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") {
+ continue
+ }
+ // From this point, we assume the uri is intended for a SPIFFE ID.
+ if len(uri.String()) > 2048 {
+ logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes")
+ return nil
+ }
+ if len(uri.Host) == 0 || len(uri.Path) == 0 {
+ logger.Warning("invalid SPIFFE ID: domain or workload ID is empty")
+ return nil
+ }
+ if len(uri.Host) > 255 {
+ logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters")
+ return nil
+ }
+ // A valid SPIFFE certificate can only have exactly one URI SAN field.
+ if len(cert.URIs) > 1 {
+ logger.Warning("invalid SPIFFE ID: multiple URI SANs")
+ return nil
+ }
+ spiffeID = uri
+ }
+ return spiffeID
+}
diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
similarity index 94%
rename from vendor/google.golang.org/grpc/credentials/internal/syscallconn.go
rename to vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
index 2f4472b..2919632 100644
--- a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go
+++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
@@ -1,5 +1,3 @@
-// +build !appengine
-
/*
*
* Copyright 2018 gRPC authors.
@@ -18,8 +16,7 @@
*
*/
-// Package internal contains credentials-internal code.
-package internal
+package credentials
import (
"net"
diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go
new file mode 100644
index 0000000..f792fd2
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/credentials/util.go
@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package credentials
+
+import (
+ "crypto/tls"
+)
+
+const alpnProtoStrH2 = "h2"
+
+// AppendH2ToNextProtos appends h2 to next protos.
+func AppendH2ToNextProtos(ps []string) []string {
+ for _, p := range ps {
+ if p == alpnProtoStrH2 {
+ return ps
+ }
+ }
+ ret := make([]string, 0, len(ps)+1)
+ ret = append(ret, ps...)
+ return append(ret, alpnProtoStrH2)
+}
+
+// CloneTLSConfig returns a shallow clone of the exported
+// fields of cfg, ignoring the unexported sync.Once, which
+// contains a mutex and must not be copied.
+//
+// If cfg is nil, a new zero tls.Config is returned.
+//
+// TODO: inline this function if possible.
+func CloneTLSConfig(cfg *tls.Config) *tls.Config {
+ if cfg == nil {
+ return &tls.Config{}
+ }
+
+ return cfg.Clone()
+}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index ae6c897..6f02725 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -26,13 +26,10 @@
const (
prefix = "GRPC_GO_"
- retryStr = prefix + "RETRY"
txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS"
)
var (
- // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
- Retry = strings.EqualFold(os.Getenv(retryStr), "on")
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
- TXTErrIgnore = !strings.EqualFold(os.Getenv(retryStr), "false")
+ TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false")
)
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
new file mode 100644
index 0000000..9bad03c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
@@ -0,0 +1,97 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package envconfig
+
+import (
+ "os"
+ "strings"
+)
+
+const (
+ // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name.
+ // Do not use this and read from env directly. Its value is read and kept in
+ // variable BootstrapFileName.
+ //
+ // When both bootstrap FileName and FileContent are set, FileName is used.
+ XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP"
+ // XDSBootstrapFileContentEnv is the env variable to set bootstrapp file
+ // content. Do not use this and read from env directly. Its value is read
+ // and kept in variable BootstrapFileName.
+ //
+ // When both bootstrap FileName and FileContent are set, FileName is used.
+ XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG"
+
+ ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH"
+ clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT"
+ aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
+ rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC"
+ federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION"
+ rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB"
+
+ c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI"
+)
+
+var (
+ // XDSBootstrapFileName holds the name of the file which contains xDS
+ // bootstrap configuration. Users can specify the location of the bootstrap
+ // file by setting the environment variable "GRPC_XDS_BOOTSTRAP".
+ //
+ // When both bootstrap FileName and FileContent are set, FileName is used.
+ XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv)
+ // XDSBootstrapFileContent holds the content of the xDS bootstrap
+ // configuration. Users can specify the bootstrap config by setting the
+ // environment variable "GRPC_XDS_BOOTSTRAP_CONFIG".
+ //
+ // When both bootstrap FileName and FileContent are set, FileName is used.
+ XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv)
+ // XDSRingHash indicates whether ring hash support is enabled, which can be
+ // disabled by setting the environment variable
+ // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false".
+ XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false")
+ // XDSClientSideSecurity is used to control processing of security
+ // configuration on the client-side.
+ //
+ // Note that there is no env var protection for the server-side because we
+ // have a brand new API on the server-side and users explicitly need to use
+ // the new API to get security integration on the server.
+ XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false")
+ // XDSAggregateAndDNS indicates whether processing of aggregated cluster
+ // and DNS cluster is enabled, which can be enabled by setting the
+ // environment variable
+ // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to
+ // "true".
+ XDSAggregateAndDNS = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true")
+
+ // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
+ // which can be disabled by setting the environment variable
+ // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
+ XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false")
+
+ // XDSFederation indicates whether federation support is enabled.
+ XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true")
+
+ // XDSRLS indicates whether processing of Cluster Specifier plugins and
+ // support for the RLS CLuster Specifier is enabled, which can be enabled by
+ // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to
+ // "true".
+ XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true")
+
+ // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
+ C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv)
+)
diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
index 8c8e19f..30a3b42 100644
--- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
+++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
@@ -19,6 +19,10 @@
// Package grpclog (internal) defines depth logging for grpc.
package grpclog
+import (
+ "os"
+)
+
// Logger is the logger used for the non-depth log functions.
var Logger LoggerV2
@@ -30,7 +34,7 @@
if DepthLogger != nil {
DepthLogger.InfoDepth(depth, args...)
} else {
- Logger.Info(args...)
+ Logger.Infoln(args...)
}
}
@@ -39,7 +43,7 @@
if DepthLogger != nil {
DepthLogger.WarningDepth(depth, args...)
} else {
- Logger.Warning(args...)
+ Logger.Warningln(args...)
}
}
@@ -48,7 +52,7 @@
if DepthLogger != nil {
DepthLogger.ErrorDepth(depth, args...)
} else {
- Logger.Error(args...)
+ Logger.Errorln(args...)
}
}
@@ -57,8 +61,9 @@
if DepthLogger != nil {
DepthLogger.FatalDepth(depth, args...)
} else {
- Logger.Fatal(args...)
+ Logger.Fatalln(args...)
}
+ os.Exit(1)
}
// LoggerV2 does underlying logging work for grpclog.
@@ -105,14 +110,17 @@
// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
// It is defined here to avoid a circular dependency.
//
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type DepthLoggerV2 interface {
- // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print.
+ // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
InfoDepth(depth int, args ...interface{})
- // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print.
+ // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
WarningDepth(depth int, args ...interface{})
- // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print.
+ // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
ErrorDepth(depth int, args ...interface{})
- // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print.
+ // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
FatalDepth(depth int, args ...interface{})
}
diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
index f6e0dc1..82af70e 100644
--- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
+++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
@@ -18,10 +18,15 @@
package grpclog
+import (
+ "fmt"
+)
+
// PrefixLogger does logging with a prefix.
//
// Logging method on a nil logs without any prefix.
type PrefixLogger struct {
+ logger DepthLoggerV2
prefix string
}
@@ -30,34 +35,47 @@
if pl != nil {
// Handle nil, so the tests can pass in a nil logger.
format = pl.prefix + format
+ pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
+ return
}
- Logger.Infof(format, args...)
+ InfoDepth(1, fmt.Sprintf(format, args...))
}
// Warningf does warning logging.
func (pl *PrefixLogger) Warningf(format string, args ...interface{}) {
if pl != nil {
format = pl.prefix + format
+ pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
+ return
}
- Logger.Warningf(format, args...)
+ WarningDepth(1, fmt.Sprintf(format, args...))
}
// Errorf does error logging.
func (pl *PrefixLogger) Errorf(format string, args ...interface{}) {
if pl != nil {
format = pl.prefix + format
+ pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
+ return
}
- Logger.Errorf(format, args...)
+ ErrorDepth(1, fmt.Sprintf(format, args...))
}
// Debugf does info logging at verbose level 2.
func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
- if Logger.V(2) {
- pl.Infof(format, args...)
+ if !Logger.V(2) {
+ return
}
+ if pl != nil {
+ // Handle nil, so the tests can pass in a nil logger.
+ format = pl.prefix + format
+ pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
+ return
+ }
+ InfoDepth(1, fmt.Sprintf(format, args...))
}
// NewPrefixLogger creates a prefix logger with the given prefix.
-func NewPrefixLogger(prefix string) *PrefixLogger {
- return &PrefixLogger{prefix: prefix}
+func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger {
+ return &PrefixLogger{logger: logger, prefix: prefix}
}
diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
index 200b115..740f83c 100644
--- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
+++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
@@ -31,26 +31,37 @@
mu sync.Mutex
)
+// Int implements rand.Int on the grpcrand global source.
+func Int() int {
+ mu.Lock()
+ defer mu.Unlock()
+ return r.Int()
+}
+
// Int63n implements rand.Int63n on the grpcrand global source.
func Int63n(n int64) int64 {
mu.Lock()
- res := r.Int63n(n)
- mu.Unlock()
- return res
+ defer mu.Unlock()
+ return r.Int63n(n)
}
// Intn implements rand.Intn on the grpcrand global source.
func Intn(n int) int {
mu.Lock()
- res := r.Intn(n)
- mu.Unlock()
- return res
+ defer mu.Unlock()
+ return r.Intn(n)
}
// Float64 implements rand.Float64 on the grpcrand global source.
func Float64() float64 {
mu.Lock()
- res := r.Float64()
- mu.Unlock()
- return res
+ defer mu.Unlock()
+ return r.Float64()
+}
+
+// Uint64 implements rand.Uint64 on the grpcrand global source.
+func Uint64() uint64 {
+ mu.Lock()
+ defer mu.Unlock()
+ return r.Uint64()
}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go
new file mode 100644
index 0000000..b25b0ba
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go
@@ -0,0 +1,63 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+ "strconv"
+ "time"
+)
+
+const maxTimeoutValue int64 = 100000000 - 1
+
+// div does integer division and round-up the result. Note that this is
+// equivalent to (d+r-1)/r but has less chance to overflow.
+func div(d, r time.Duration) int64 {
+ if d%r > 0 {
+ return int64(d/r + 1)
+ }
+ return int64(d / r)
+}
+
+// EncodeDuration encodes the duration to the format grpc-timeout header
+// accepts.
+//
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
+func EncodeDuration(t time.Duration) string {
+ // TODO: This is simplistic and not bandwidth efficient. Improve it.
+ if t <= 0 {
+ return "0n"
+ }
+ if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "n"
+ }
+ if d := div(t, time.Microsecond); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "u"
+ }
+ if d := div(t, time.Millisecond); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "m"
+ }
+ if d := div(t, time.Second); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "S"
+ }
+ if d := div(t, time.Minute); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "M"
+ }
+ // Note that maxTimeoutValue * time.Hour > MaxInt64.
+ return strconv.FormatInt(div(t, time.Hour), 10) + "H"
+}
diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go
similarity index 72%
rename from vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go
rename to vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go
index d4346e9..e2f948e 100644
--- a/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go
@@ -1,8 +1,6 @@
-// +build appengine
-
/*
*
- * Copyright 2018 gRPC authors.
+ * Copyright 2021 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,13 +16,5 @@
*
*/
-package internal
-
-import (
- "net"
-)
-
-// WrapSyscallConn returns newConn on appengine.
-func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
- return newConn
-}
+// Package grpcutil provides utility functions used across the gRPC codebase.
+package grpcutil
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go
new file mode 100644
index 0000000..6f22bd8
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go
@@ -0,0 +1,40 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+ "context"
+
+ "google.golang.org/grpc/metadata"
+)
+
+type mdExtraKey struct{}
+
+// WithExtraMetadata creates a new context with incoming md attached.
+func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context {
+ return context.WithValue(ctx, mdExtraKey{}, md)
+}
+
+// ExtraMetadata returns the incoming metadata in ctx if it exists. The
+// returned MD should not be modified. Writing to it may cause races.
+// Modification should be made to copies of the returned MD.
+func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) {
+ md, ok = ctx.Value(mdExtraKey{}).(metadata.MD)
+ return
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
new file mode 100644
index 0000000..4e74750
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
@@ -0,0 +1,84 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+ "errors"
+ "strings"
+)
+
+// ParseMethod splits service and method from the input. It expects format
+// "/service/method".
+//
+func ParseMethod(methodName string) (service, method string, _ error) {
+ if !strings.HasPrefix(methodName, "/") {
+ return "", "", errors.New("invalid method name: should start with /")
+ }
+ methodName = methodName[1:]
+
+ pos := strings.LastIndex(methodName, "/")
+ if pos < 0 {
+ return "", "", errors.New("invalid method name: suffix /method is missing")
+ }
+ return methodName[:pos], methodName[pos+1:], nil
+}
+
+const baseContentType = "application/grpc"
+
+// ContentSubtype returns the content-subtype for the given content-type. The
+// given content-type must be a valid content-type that starts with
+// "application/grpc". A content-subtype will follow "application/grpc" after a
+// "+" or ";". See
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+//
+// If contentType is not a valid content-type for gRPC, the boolean
+// will be false, otherwise true. If content-type == "application/grpc",
+// "application/grpc+", or "application/grpc;", the boolean will be true,
+// but no content-subtype will be returned.
+//
+// contentType is assumed to be lowercase already.
+func ContentSubtype(contentType string) (string, bool) {
+ if contentType == baseContentType {
+ return "", true
+ }
+ if !strings.HasPrefix(contentType, baseContentType) {
+ return "", false
+ }
+ // guaranteed since != baseContentType and has baseContentType prefix
+ switch contentType[len(baseContentType)] {
+ case '+', ';':
+ // this will return true for "application/grpc+" or "application/grpc;"
+ // which the previous validContentType function tested to be valid, so we
+ // just say that no content-subtype is specified in this case
+ return contentType[len(baseContentType)+1:], true
+ default:
+ return "", false
+ }
+}
+
+// ContentType builds full content type with the given sub-type.
+//
+// contentSubtype is assumed to be lowercase
+func ContentType(contentSubtype string) string {
+ if contentSubtype == "" {
+ return baseContentType
+ }
+ return baseContentType + "+" + contentSubtype
+}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go
similarity index 63%
rename from vendor/google.golang.org/grpc/internal/resolver/dns/go113.go
rename to vendor/google.golang.org/grpc/internal/grpcutil/regex.go
index 8783a8c..7a092b2 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go
@@ -1,8 +1,6 @@
-// +build go1.13
-
/*
*
- * Copyright 2019 gRPC authors.
+ * Copyright 2021 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,16 +16,16 @@
*
*/
-package dns
+package grpcutil
-import "net"
+import "regexp"
-func init() {
- filterError = func(err error) error {
- if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound {
- // The name does not exist; not an error.
- return nil
- }
- return err
+// FullMatchWithRegex returns whether the full text matches the regex provided.
+func FullMatchWithRegex(re *regexp.Regexp, text string) bool {
+ if len(text) == 0 {
+ return re.MatchString(text)
}
+ re.Longest()
+ rem := re.FindString(text)
+ return len(rem) == len(text)
}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/target.go b/vendor/google.golang.org/grpc/internal/grpcutil/target.go
deleted file mode 100644
index 80b33cd..0000000
--- a/vendor/google.golang.org/grpc/internal/grpcutil/target.go
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- *
- * Copyright 2020 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package grpcutil provides a bunch of utility functions to be used across the
-// gRPC codebase.
-package grpcutil
-
-import (
- "strings"
-
- "google.golang.org/grpc/resolver"
-)
-
-// split2 returns the values from strings.SplitN(s, sep, 2).
-// If sep is not found, it returns ("", "", false) instead.
-func split2(s, sep string) (string, string, bool) {
- spl := strings.SplitN(s, sep, 2)
- if len(spl) < 2 {
- return "", "", false
- }
- return spl[0], spl[1], true
-}
-
-// ParseTarget splits target into a resolver.Target struct containing scheme,
-// authority and endpoint.
-//
-// If target is not a valid scheme://authority/endpoint, it returns {Endpoint:
-// target}.
-func ParseTarget(target string) (ret resolver.Target) {
- var ok bool
- ret.Scheme, ret.Endpoint, ok = split2(target, "://")
- if !ok {
- return resolver.Target{Endpoint: target}
- }
- ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/")
- if !ok {
- return resolver.Target{Endpoint: target}
- }
- return ret
-}
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index c6fbe8b..1b596bf 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -25,6 +25,7 @@
"time"
"google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/serviceconfig"
)
var (
@@ -37,12 +38,32 @@
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
// default, but tests may wish to set it lower for convenience.
KeepaliveMinPingTime = 10 * time.Second
- // NewRequestInfoContext creates a new context based on the argument context attaching
- // the passed in RequestInfo to the new context.
- NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context
// ParseServiceConfigForTesting is for creating a fake
// ClientConn for resolver testing only
ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult
+ // EqualServiceConfigForTesting is for testing service config generation and
+ // parsing. Both a and b should be returned by ParseServiceConfigForTesting.
+ // This function compares the config without rawJSON stripped, in case the
+ // there's difference in white space.
+ EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool
+ // GetCertificateProviderBuilder returns the registered builder for the
+ // given name. This is set by package certprovider for use from xDS
+ // bootstrap code while parsing certificate provider configs in the
+ // bootstrap file.
+ GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder
+ // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo
+ // stored in the passed in attributes. This is set by
+ // credentials/xds/xds.go.
+ GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo
+ // GetServerCredentials returns the transport credentials configured on a
+ // gRPC server. An xDS-enabled server needs to know what type of credentials
+ // is configured on the underlying gRPC server. This is set by server.go.
+ GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials
+ // DrainServerTransports initiates a graceful close of existing connections
+ // on a gRPC server accepted on the provided listener address. An
+ // xDS-enabled server invokes this method on a grpc.Server when a particular
+ // listener moves to "not-serving" mode.
+ DrainServerTransports interface{} // func(*grpc.Server, string)
)
// HealthChecker defines the signature of the client-side LB channel health checking function.
diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go
new file mode 100644
index 0000000..b8733db
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go
@@ -0,0 +1,74 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package metadata contains functions to set and get metadata from addresses.
+//
+// This package is experimental.
+package metadata
+
+import (
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/resolver"
+)
+
+type mdKeyType string
+
+const mdKey = mdKeyType("grpc.internal.address.metadata")
+
+type mdValue metadata.MD
+
+func (m mdValue) Equal(o interface{}) bool {
+ om, ok := o.(mdValue)
+ if !ok {
+ return false
+ }
+ if len(m) != len(om) {
+ return false
+ }
+ for k, v := range m {
+ ov := om[k]
+ if len(ov) != len(v) {
+ return false
+ }
+ for i, ve := range v {
+ if ov[i] != ve {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Get returns the metadata of addr.
+func Get(addr resolver.Address) metadata.MD {
+ attrs := addr.Attributes
+ if attrs == nil {
+ return nil
+ }
+ md, _ := attrs.Value(mdKey).(mdValue)
+ return metadata.MD(md)
+}
+
+// Set sets (overrides) the metadata in addr.
+//
+// When a SubConn is created with this address, the RPCs sent on it will all
+// have this metadata.
+func Set(addr resolver.Address, md metadata.MD) resolver.Address {
+ addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md))
+ return addr
+}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go
new file mode 100644
index 0000000..c7a18a9
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go
@@ -0,0 +1,167 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package resolver provides internal resolver-related functionality.
+package resolver
+
+import (
+ "context"
+ "sync"
+
+ "google.golang.org/grpc/internal/serviceconfig"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/resolver"
+)
+
+// ConfigSelector controls what configuration to use for every RPC.
+type ConfigSelector interface {
+ // Selects the configuration for the RPC, or terminates it using the error.
+ // This error will be converted by the gRPC library to a status error with
+ // code UNKNOWN if it is not returned as a status error.
+ SelectConfig(RPCInfo) (*RPCConfig, error)
+}
+
+// RPCInfo contains RPC information needed by a ConfigSelector.
+type RPCInfo struct {
+ // Context is the user's context for the RPC and contains headers and
+ // application timeout. It is passed for interception purposes and for
+ // efficiency reasons. SelectConfig should not be blocking.
+ Context context.Context
+ Method string // i.e. "/Service/Method"
+}
+
+// RPCConfig describes the configuration to use for each RPC.
+type RPCConfig struct {
+ // The context to use for the remainder of the RPC; can pass info to LB
+ // policy or affect timeout or metadata.
+ Context context.Context
+ MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC
+ OnCommitted func() // Called when the RPC has been committed (retries no longer possible)
+ Interceptor ClientInterceptor
+}
+
+// ClientStream is the same as grpc.ClientStream, but defined here for circular
+// dependency reasons.
+type ClientStream interface {
+ // Header returns the header metadata received from the server if there
+ // is any. It blocks if the metadata is not ready to read.
+ Header() (metadata.MD, error)
+ // Trailer returns the trailer metadata from the server, if there is any.
+ // It must only be called after stream.CloseAndRecv has returned, or
+ // stream.Recv has returned a non-nil error (including io.EOF).
+ Trailer() metadata.MD
+ // CloseSend closes the send direction of the stream. It closes the stream
+ // when non-nil error is met. It is also not safe to call CloseSend
+ // concurrently with SendMsg.
+ CloseSend() error
+ // Context returns the context for this stream.
+ //
+ // It should not be called until after Header or RecvMsg has returned. Once
+ // called, subsequent client-side retries are disabled.
+ Context() context.Context
+ // SendMsg is generally called by generated code. On error, SendMsg aborts
+ // the stream. If the error was generated by the client, the status is
+ // returned directly; otherwise, io.EOF is returned and the status of
+ // the stream may be discovered using RecvMsg.
+ //
+ // SendMsg blocks until:
+ // - There is sufficient flow control to schedule m with the transport, or
+ // - The stream is done, or
+ // - The stream breaks.
+ //
+ // SendMsg does not wait until the message is received by the server. An
+ // untimely stream closure may result in lost messages. To ensure delivery,
+ // users should ensure the RPC completed successfully using RecvMsg.
+ //
+ // It is safe to have a goroutine calling SendMsg and another goroutine
+ // calling RecvMsg on the same stream at the same time, but it is not safe
+ // to call SendMsg on the same stream in different goroutines. It is also
+ // not safe to call CloseSend concurrently with SendMsg.
+ SendMsg(m interface{}) error
+ // RecvMsg blocks until it receives a message into m or the stream is
+ // done. It returns io.EOF when the stream completes successfully. On
+ // any other error, the stream is aborted and the error contains the RPC
+ // status.
+ //
+ // It is safe to have a goroutine calling SendMsg and another goroutine
+ // calling RecvMsg on the same stream at the same time, but it is not
+ // safe to call RecvMsg on the same stream in different goroutines.
+ RecvMsg(m interface{}) error
+}
+
+// ClientInterceptor is an interceptor for gRPC client streams.
+type ClientInterceptor interface {
+ // NewStream produces a ClientStream for an RPC which may optionally use
+ // the provided function to produce a stream for delegation. Note:
+ // RPCInfo.Context should not be used (will be nil).
+ //
+ // done is invoked when the RPC is finished using its connection, or could
+ // not be assigned a connection. RPC operations may still occur on
+ // ClientStream after done is called, since the interceptor is invoked by
+ // application-layer operations. done must never be nil when called.
+ NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error)
+}
+
+// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side.
+type ServerInterceptor interface {
+ // AllowRPC checks if an incoming RPC is allowed to proceed based on
+ // information about connection RPC was received on, and HTTP Headers. This
+ // information will be piped into context.
+ AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting.
+}
+
+type csKeyType string
+
+const csKey = csKeyType("grpc.internal.resolver.configSelector")
+
+// SetConfigSelector sets the config selector in state and returns the new
+// state.
+func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State {
+ state.Attributes = state.Attributes.WithValue(csKey, cs)
+ return state
+}
+
+// GetConfigSelector retrieves the config selector from state, if present, and
+// returns it or nil if absent.
+func GetConfigSelector(state resolver.State) ConfigSelector {
+ cs, _ := state.Attributes.Value(csKey).(ConfigSelector)
+ return cs
+}
+
+// SafeConfigSelector allows for safe switching of ConfigSelector
+// implementations such that previous values are guaranteed to not be in use
+// when UpdateConfigSelector returns.
+type SafeConfigSelector struct {
+ mu sync.RWMutex
+ cs ConfigSelector
+}
+
+// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until
+// all uses of the previous ConfigSelector have completed.
+func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) {
+ scs.mu.Lock()
+ defer scs.mu.Unlock()
+ scs.cs = cs
+}
+
+// SelectConfig defers to the current ConfigSelector in scs.
+func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) {
+ scs.mu.RLock()
+ defer scs.mu.RUnlock()
+ return scs.cs.SelectConfig(r)
+}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
index c368db6..75301c5 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
@@ -32,7 +32,9 @@
"sync"
"time"
+ grpclbstate "google.golang.org/grpc/balancer/grpclb/state"
"google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal/backoff"
"google.golang.org/grpc/internal/envconfig"
"google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/resolver"
@@ -43,6 +45,15 @@
// addresses from SRV records. Must not be changed after init time.
var EnableSRVLookups = false
+var logger = grpclog.Component("dns")
+
+// Globals to stub out in tests. TODO: Perhaps these two can be combined into a
+// single variable for testing the resolver?
+var (
+ newTimer = time.NewTimer
+ newTimerDNSResRate = time.NewTimer
+)
+
func init() {
resolver.Register(NewBuilder())
}
@@ -140,7 +151,6 @@
d.wg.Add(1)
go d.watcher()
- d.ResolveNow(resolver.ResolveNowOptions{})
return d, nil
}
@@ -198,28 +208,38 @@
func (d *dnsResolver) watcher() {
defer d.wg.Done()
+ backoffIndex := 1
for {
- select {
- case <-d.ctx.Done():
- return
- case <-d.rn:
- }
-
state, err := d.lookup()
if err != nil {
+ // Report error to the underlying grpc.ClientConn.
d.cc.ReportError(err)
} else {
- d.cc.UpdateState(*state)
+ err = d.cc.UpdateState(*state)
}
- // Sleep to prevent excessive re-resolutions. Incoming resolution requests
- // will be queued in d.rn.
- t := time.NewTimer(minDNSResRate)
+ var timer *time.Timer
+ if err == nil {
+ // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least
+ // to prevent constantly re-resolving.
+ backoffIndex = 1
+ timer = newTimerDNSResRate(minDNSResRate)
+ select {
+ case <-d.ctx.Done():
+ timer.Stop()
+ return
+ case <-d.rn:
+ }
+ } else {
+ // Poll on an error found in DNS Resolver or an error received from ClientConn.
+ timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex))
+ backoffIndex++
+ }
select {
- case <-t.C:
case <-d.ctx.Done():
- t.Stop()
+ timer.Stop()
return
+ case <-timer.C:
}
}
}
@@ -251,27 +271,22 @@
return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
}
addr := ip + ":" + strconv.Itoa(int(s.Port))
- newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target})
+ newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target})
}
}
return newAddrs, nil
}
-var filterError = func(err error) error {
+func handleDNSError(err error, lookupType string) error {
if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
// Timeouts and temporary errors should be communicated to gRPC to
// attempt another DNS query (with backoff). Other errors should be
// suppressed (they may represent the absence of a TXT record).
return nil
}
- return err
-}
-
-func handleDNSError(err error, lookupType string) error {
- err = filterError(err)
if err != nil {
err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err)
- grpclog.Infoln(err)
+ logger.Info(err)
}
return err
}
@@ -294,7 +309,7 @@
// TXT record must have "grpc_config=" attribute in order to be used as service config.
if !strings.HasPrefix(res, txtAttribute) {
- grpclog.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
+ logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
// This is not an error; it is the equivalent of not having a service config.
return nil
}
@@ -303,12 +318,12 @@
}
func (d *dnsResolver) lookupHost() ([]resolver.Address, error) {
- var newAddrs []resolver.Address
addrs, err := d.resolver.LookupHost(d.ctx, d.host)
if err != nil {
err = handleDNSError(err, "A")
return nil, err
}
+ newAddrs := make([]resolver.Address, 0, len(addrs))
for _, a := range addrs {
ip, ok := formatIP(a)
if !ok {
@@ -326,13 +341,15 @@
if hostErr != nil && (srvErr != nil || len(srv) == 0) {
return nil, hostErr
}
- state := &resolver.State{
- Addresses: append(addrs, srv...),
+
+ state := resolver.State{Addresses: addrs}
+ if len(srv) > 0 {
+ state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv})
}
if !d.disableServiceConfig {
state.ServiceConfig = d.lookupTXT()
}
- return state, nil
+ return &state, nil
}
// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
@@ -418,12 +435,12 @@
var rcs []rawChoice
err := json.Unmarshal([]byte(js), &rcs)
if err != nil {
- grpclog.Warningf("dns: error parsing service config json: %v", err)
+ logger.Warningf("dns: error parsing service config json: %v", err)
return ""
}
cliHostname, err := os.Hostname()
if err != nil {
- grpclog.Warningf("dns: error getting client hostname: %v", err)
+ logger.Warningf("dns: error getting client hostname: %v", err)
return ""
}
var sc string
diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
new file mode 100644
index 0000000..20852e5
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
@@ -0,0 +1,73 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package unix implements a resolver for unix targets.
+package unix
+
+import (
+ "fmt"
+
+ "google.golang.org/grpc/internal/transport/networktype"
+ "google.golang.org/grpc/resolver"
+)
+
+const unixScheme = "unix"
+const unixAbstractScheme = "unix-abstract"
+
+type builder struct {
+ scheme string
+}
+
+func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
+ if target.Authority != "" {
+ return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority)
+ }
+
+ // gRPC was parsing the dial target manually before PR #4817, and we
+ // switched to using url.Parse() in that PR. To avoid breaking existing
+ // resolver implementations we ended up stripping the leading "/" from the
+ // endpoint. This obviously does not work for the "unix" scheme. Hence we
+ // end up using the parsed URL instead.
+ endpoint := target.URL.Path
+ if endpoint == "" {
+ endpoint = target.URL.Opaque
+ }
+ addr := resolver.Address{Addr: endpoint}
+ if b.scheme == unixAbstractScheme {
+ // prepend "\x00" to address for unix-abstract
+ addr.Addr = "\x00" + addr.Addr
+ }
+ cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}})
+ return &nopResolver{}, nil
+}
+
+func (b *builder) Scheme() string {
+ return b.scheme
+}
+
+type nopResolver struct {
+}
+
+func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {}
+
+func (*nopResolver) Close() {}
+
+func init() {
+ resolver.Register(&builder{scheme: unixScheme})
+ resolver.Register(&builder{scheme: unixAbstractScheme})
+}
diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
new file mode 100644
index 0000000..badbdbf
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
@@ -0,0 +1,180 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package serviceconfig contains utility functions to parse service config.
+package serviceconfig
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ externalserviceconfig "google.golang.org/grpc/serviceconfig"
+)
+
+var logger = grpclog.Component("core")
+
+// BalancerConfig wraps the name and config associated with one load balancing
+// policy. It corresponds to a single entry of the loadBalancingConfig field
+// from ServiceConfig.
+//
+// It implements the json.Unmarshaler interface.
+//
+// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247
+type BalancerConfig struct {
+ Name string
+ Config externalserviceconfig.LoadBalancingConfig
+}
+
+type intermediateBalancerConfig []map[string]json.RawMessage
+
+// MarshalJSON implements the json.Marshaler interface.
+//
+// It marshals the balancer and config into a length-1 slice
+// ([]map[string]config).
+func (bc *BalancerConfig) MarshalJSON() ([]byte, error) {
+ if bc.Config == nil {
+ // If config is nil, return empty config `{}`.
+ return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil
+ }
+ c, err := json.Marshal(bc.Config)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+//
+// ServiceConfig contains a list of loadBalancingConfigs, each with a name and
+// config. This method iterates through that list in order, and stops at the
+// first policy that is supported.
+// - If the config for the first supported policy is invalid, the whole service
+// config is invalid.
+// - If the list doesn't contain any supported policy, the whole service config
+// is invalid.
+func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
+ var ir intermediateBalancerConfig
+ err := json.Unmarshal(b, &ir)
+ if err != nil {
+ return err
+ }
+
+ var names []string
+ for i, lbcfg := range ir {
+ if len(lbcfg) != 1 {
+ return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
+ }
+
+ var (
+ name string
+ jsonCfg json.RawMessage
+ )
+ // Get the key:value pair from the map. We have already made sure that
+ // the map contains a single entry.
+ for name, jsonCfg = range lbcfg {
+ }
+
+ names = append(names, name)
+ builder := balancer.Get(name)
+ if builder == nil {
+ // If the balancer is not registered, move on to the next config.
+ // This is not an error.
+ continue
+ }
+ bc.Name = name
+
+ parser, ok := builder.(balancer.ConfigParser)
+ if !ok {
+ if string(jsonCfg) != "{}" {
+ logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
+ }
+ // Stop at this, though the builder doesn't support parsing config.
+ return nil
+ }
+
+ cfg, err := parser.ParseConfig(jsonCfg)
+ if err != nil {
+ return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)
+ }
+ bc.Config = cfg
+ return nil
+ }
+ // This is reached when the for loop iterates over all entries, but didn't
+ // return. This means we had a loadBalancingConfig slice but did not
+ // encounter a registered policy. The config is considered invalid in this
+ // case.
+ return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names)
+}
+
+// MethodConfig defines the configuration recommended by the service providers for a
+// particular method.
+type MethodConfig struct {
+ // WaitForReady indicates whether RPCs sent to this method should wait until
+ // the connection is ready by default (!failfast). The value specified via the
+ // gRPC client API will override the value set here.
+ WaitForReady *bool
+ // Timeout is the default timeout for RPCs sent to this method. The actual
+ // deadline used will be the minimum of the value specified here and the value
+ // set by the application via the gRPC client API. If either one is not set,
+ // then the other will be used. If neither is set, then the RPC has no deadline.
+ Timeout *time.Duration
+ // MaxReqSize is the maximum allowed payload size for an individual request in a
+ // stream (client->server) in bytes. The size which is measured is the serialized
+ // payload after per-message compression (but before stream compression) in bytes.
+ // The actual value used is the minimum of the value specified here and the value set
+ // by the application via the gRPC client API. If either one is not set, then the other
+ // will be used. If neither is set, then the built-in default is used.
+ MaxReqSize *int
+ // MaxRespSize is the maximum allowed payload size for an individual response in a
+ // stream (server->client) in bytes.
+ MaxRespSize *int
+ // RetryPolicy configures retry options for the method.
+ RetryPolicy *RetryPolicy
+}
+
+// RetryPolicy defines the go-native version of the retry policy defined by the
+// service config here:
+// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
+type RetryPolicy struct {
+ // MaxAttempts is the maximum number of attempts, including the original RPC.
+ //
+ // This field is required and must be two or greater.
+ MaxAttempts int
+
+ // Exponential backoff parameters. The initial retry attempt will occur at
+ // random(0, initialBackoff). In general, the nth attempt will occur at
+ // random(0,
+ // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)).
+ //
+ // These fields are required and must be greater than zero.
+ InitialBackoff time.Duration
+ MaxBackoff time.Duration
+ BackoffMultiplier float64
+
+ // The set of status codes which may be retried.
+ //
+ // Status codes are specified as strings, e.g., "UNAVAILABLE".
+ //
+ // This field is required and must be non-empty.
+ // Note: a set is used to store this for easy lookup.
+ RetryableStatusCodes map[codes.Code]bool
+}
diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go
index 6812606..e5c6513 100644
--- a/vendor/google.golang.org/grpc/internal/status/status.go
+++ b/vendor/google.golang.org/grpc/internal/status/status.go
@@ -97,7 +97,7 @@
if s.Code() == codes.OK {
return nil
}
- return (*Error)(s.Proto())
+ return &Error{s: s}
}
// WithDetails returns a new status with the provided details messages appended to the status.
@@ -136,26 +136,31 @@
return details
}
-// Error is an alias of a status proto. It implements error and Status,
-// and a nil Error should never be returned by this package.
-type Error spb.Status
+func (s *Status) String() string {
+ return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message())
+}
-func (se *Error) Error() string {
- p := (*spb.Status)(se)
- return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
+// Error wraps a pointer of a status proto. It implements error and Status,
+// and a nil *Error should never be returned by this package.
+type Error struct {
+ s *Status
+}
+
+func (e *Error) Error() string {
+ return e.s.String()
}
// GRPCStatus returns the Status represented by se.
-func (se *Error) GRPCStatus() *Status {
- return FromProto((*spb.Status)(se))
+func (e *Error) GRPCStatus() *Status {
+ return e.s
}
// Is implements future error.Is functionality.
// A Error is equivalent if the code and message are identical.
-func (se *Error) Is(target error) bool {
+func (e *Error) Is(target error) bool {
tse, ok := target.(*Error)
if !ok {
return false
}
- return proto.Equal((*spb.Status)(se), (*spb.Status)(tse))
+ return proto.Equal(e.s.s, tse.s.s)
}
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
index 43281a3..b3a7227 100644
--- a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
+++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
@@ -1,5 +1,3 @@
-// +build !appengine
-
/*
*
* Copyright 2018 gRPC authors.
@@ -32,35 +30,35 @@
"google.golang.org/grpc/grpclog"
)
+var logger = grpclog.Component("core")
+
// GetCPUTime returns the how much CPU time has passed since the start of this process.
func GetCPUTime() int64 {
var ts unix.Timespec
if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil {
- grpclog.Fatal(err)
+ logger.Fatal(err)
}
return ts.Nano()
}
-// Rusage is an alias for syscall.Rusage under linux non-appengine environment.
-type Rusage syscall.Rusage
+// Rusage is an alias for syscall.Rusage under linux environment.
+type Rusage = syscall.Rusage
// GetRusage returns the resource usage of current process.
-func GetRusage() (rusage *Rusage) {
- rusage = new(Rusage)
- syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage))
- return
+func GetRusage() *Rusage {
+ rusage := new(Rusage)
+ syscall.Getrusage(syscall.RUSAGE_SELF, rusage)
+ return rusage
}
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
// between two Rusage structs.
func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
- f := (*syscall.Rusage)(first)
- l := (*syscall.Rusage)(latest)
var (
- utimeDiffs = l.Utime.Sec - f.Utime.Sec
- utimeDiffus = l.Utime.Usec - f.Utime.Usec
- stimeDiffs = l.Stime.Sec - f.Stime.Sec
- stimeDiffus = l.Stime.Usec - f.Stime.Usec
+ utimeDiffs = latest.Utime.Sec - first.Utime.Sec
+ utimeDiffus = latest.Utime.Usec - first.Utime.Usec
+ stimeDiffs = latest.Stime.Sec - first.Stime.Sec
+ stimeDiffus = latest.Stime.Usec - first.Stime.Usec
)
uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
index d3fd9da..999f52c 100644
--- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
+++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
@@ -1,4 +1,5 @@
-// +build !linux appengine
+//go:build !linux
+// +build !linux
/*
*
@@ -18,6 +19,8 @@
*
*/
+// Package syscall provides functionalities that grpc uses to get low-level
+// operating system stats/info.
package syscall
import (
@@ -29,44 +32,45 @@
)
var once sync.Once
+var logger = grpclog.Component("core")
func log() {
once.Do(func() {
- grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.")
+ logger.Info("CPU time info is unavailable on non-linux environments.")
})
}
-// GetCPUTime returns the how much CPU time has passed since the start of this process.
-// It always returns 0 under non-linux or appengine environment.
+// GetCPUTime returns the how much CPU time has passed since the start of this
+// process. It always returns 0 under non-linux environments.
func GetCPUTime() int64 {
log()
return 0
}
-// Rusage is an empty struct under non-linux or appengine environment.
+// Rusage is an empty struct under non-linux environments.
type Rusage struct{}
-// GetRusage is a no-op function under non-linux or appengine environment.
-func GetRusage() (rusage *Rusage) {
+// GetRusage is a no-op function under non-linux environments.
+func GetRusage() *Rusage {
log()
return nil
}
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
-// between two Rusage structs. It a no-op function for non-linux or appengine environment.
+// between two Rusage structs. It a no-op function for non-linux environments.
func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
log()
return 0, 0
}
-// SetTCPUserTimeout is a no-op function under non-linux or appengine environments
+// SetTCPUserTimeout is a no-op function under non-linux environments.
func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
log()
return nil
}
-// GetTCPUserTimeout is a no-op function under non-linux or appengine environments
-// a negative return value indicates the operation is not supported
+// GetTCPUserTimeout is a no-op function under non-linux environments.
+// A negative return value indicates the operation is not supported
func GetTCPUserTimeout(conn net.Conn) (int, error) {
log()
return -1, nil
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
index ddee20b..8394d25 100644
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -20,13 +20,17 @@
import (
"bytes"
+ "errors"
"fmt"
"runtime"
+ "strconv"
"sync"
"sync/atomic"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
+ "google.golang.org/grpc/internal/grpcutil"
+ "google.golang.org/grpc/status"
)
var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
@@ -128,6 +132,15 @@
func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM
+type earlyAbortStream struct {
+ httpStatus uint32
+ streamID uint32
+ contentSubtype string
+ status *status.Status
+}
+
+func (*earlyAbortStream) isTransportResponseFrame() bool { return false }
+
type dataFrame struct {
streamID uint32
endStream bool
@@ -284,7 +297,7 @@
// closed and nilled when transportResponseFrames drops below the
// threshold. Both fields are protected by mu.
transportResponseFrames int
- trfChan atomic.Value // *chan struct{}
+ trfChan atomic.Value // chan struct{}
}
func newControlBuffer(done <-chan struct{}) *controlBuffer {
@@ -298,10 +311,10 @@
// throttle blocks if there are too many incomingSettings/cleanupStreams in the
// controlbuf.
func (c *controlBuffer) throttle() {
- ch, _ := c.trfChan.Load().(*chan struct{})
+ ch, _ := c.trfChan.Load().(chan struct{})
if ch != nil {
select {
- case <-*ch:
+ case <-ch:
case <-c.done:
}
}
@@ -335,8 +348,7 @@
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
// We are adding the frame that puts us over the threshold; create
// a throttling channel.
- ch := make(chan struct{})
- c.trfChan.Store(&ch)
+ c.trfChan.Store(make(chan struct{}))
}
}
c.mu.Unlock()
@@ -377,9 +389,9 @@
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
// We are removing the frame that put us over the
// threshold; close and clear the throttling channel.
- ch := c.trfChan.Load().(*chan struct{})
- close(*ch)
- c.trfChan.Store((*chan struct{})(nil))
+ ch := c.trfChan.Load().(chan struct{})
+ close(ch)
+ c.trfChan.Store((chan struct{})(nil))
}
c.transportResponseFrames--
}
@@ -395,7 +407,6 @@
select {
case <-c.ch:
case <-c.done:
- c.finish()
return nil, ErrConnClosing
}
}
@@ -420,6 +431,14 @@
hdr.onOrphaned(ErrConnClosing)
}
}
+ // In case throttle() is currently in flight, it needs to be unblocked.
+ // Otherwise, the transport may not close, since the transport is closed by
+ // the reader encountering the connection error.
+ ch, _ := c.trfChan.Load().(chan struct{})
+ if ch != nil {
+ close(ch)
+ }
+ c.trfChan.Store((chan struct{})(nil))
c.mu.Unlock()
}
@@ -505,7 +524,9 @@
// 1. When the connection is closed by some other known issue.
// 2. User closed the connection.
// 3. A graceful close of connection.
- infof("transport: loopyWriter.run returning. %v", err)
+ if logger.V(logLevel) {
+ logger.Infof("transport: loopyWriter.run returning. %v", err)
+ }
err = nil
}
}()
@@ -605,7 +626,9 @@
if l.side == serverSide {
str, ok := l.estdStreams[h.streamID]
if !ok {
- warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
+ if logger.V(logLevel) {
+ logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
+ }
return nil
}
// Case 1.A: Server is responding back with headers.
@@ -658,7 +681,9 @@
l.hBuf.Reset()
for _, f := range hf {
if err := l.hEnc.WriteField(f); err != nil {
- warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err)
+ if logger.V(logLevel) {
+ logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err)
+ }
}
}
var (
@@ -743,6 +768,27 @@
return nil
}
+func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error {
+ if l.side == clientSide {
+ return errors.New("earlyAbortStream not handled on client")
+ }
+ // In case the caller forgets to set the http status, default to 200.
+ if eas.httpStatus == 0 {
+ eas.httpStatus = 200
+ }
+ headerFields := []hpack.HeaderField{
+ {Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))},
+ {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)},
+ {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))},
+ {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())},
+ }
+
+ if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil {
+ return err
+ }
+ return nil
+}
+
func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
if l.side == clientSide {
l.draining = true
@@ -781,6 +827,8 @@
return l.registerStreamHandler(i)
case *cleanupStream:
return l.cleanupStreamHandler(i)
+ case *earlyAbortStream:
+ return l.earlyAbortStreamHandler(i)
case *incomingGoAway:
return l.incomingGoAwayHandler(i)
case *dataFrame:
@@ -857,38 +905,45 @@
return false, nil
}
var (
- idx int
buf []byte
)
- if len(dataItem.h) != 0 { // data header has not been written out yet.
- buf = dataItem.h
- } else {
- idx = 1
- buf = dataItem.d
- }
- size := http2MaxFrameLen
- if len(buf) < size {
- size = len(buf)
- }
+ // Figure out the maximum size we can send
+ maxSize := http2MaxFrameLen
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
str.state = waitingOnStreamQuota
return false, nil
- } else if strQuota < size {
- size = strQuota
+ } else if maxSize > strQuota {
+ maxSize = strQuota
+ }
+ if maxSize > int(l.sendQuota) { // connection-level flow control.
+ maxSize = int(l.sendQuota)
+ }
+ // Compute how much of the header and data we can send within quota and max frame length
+ hSize := min(maxSize, len(dataItem.h))
+ dSize := min(maxSize-hSize, len(dataItem.d))
+ if hSize != 0 {
+ if dSize == 0 {
+ buf = dataItem.h
+ } else {
+ // We can add some data to grpc message header to distribute bytes more equally across frames.
+ // Copy on the stack to avoid generating garbage
+ var localBuf [http2MaxFrameLen]byte
+ copy(localBuf[:hSize], dataItem.h)
+ copy(localBuf[hSize:], dataItem.d[:dSize])
+ buf = localBuf[:hSize+dSize]
+ }
+ } else {
+ buf = dataItem.d
}
- if l.sendQuota < uint32(size) { // connection-level flow control.
- size = int(l.sendQuota)
- }
+ size := hSize + dSize
+
// Now that outgoing flow controls are checked we can replenish str's write quota
str.wq.replenish(size)
var endStream bool
// If this is the last data message on this stream and all of it can be written in this iteration.
- if dataItem.endStream && size == len(buf) {
- // buf contains either data or it contains header but data is empty.
- if idx == 1 || len(dataItem.d) == 0 {
- endStream = true
- }
+ if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size {
+ endStream = true
}
if dataItem.onEachWrite != nil {
dataItem.onEachWrite()
@@ -896,14 +951,10 @@
if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
return false, err
}
- buf = buf[size:]
str.bytesOutStanding += size
l.sendQuota -= uint32(size)
- if idx == 0 {
- dataItem.h = buf
- } else {
- dataItem.d = buf
- }
+ dataItem.h = dataItem.h[hSize:]
+ dataItem.d = dataItem.d[dSize:]
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
str.itl.dequeue()
@@ -924,3 +975,10 @@
}
return false, nil
}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
index f262edd..97198c5 100644
--- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
+++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
@@ -136,12 +136,10 @@
// newLimit updates the inflow window to a new value n.
// It assumes that n is always greater than the old limit.
-func (f *inFlow) newLimit(n uint32) uint32 {
+func (f *inFlow) newLimit(n uint32) {
f.mu.Lock()
- d := n - f.limit
f.limit = n
f.mu.Unlock()
- return d
}
func (f *inFlow) maybeAdjust(n uint32) uint32 {
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index fc44e97..1c3459c 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -39,6 +39,7 @@
"golang.org/x/net/http2"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@@ -57,7 +58,7 @@
}
contentType := r.Header.Get("Content-Type")
// TODO: do we assume contentType is lowercase? we did before
- contentSubtype, validContentType := contentSubtype(contentType)
+ contentSubtype, validContentType := grpcutil.ContentSubtype(contentType)
if !validContentType {
return nil, errors.New("invalid gRPC request content-type")
}
@@ -140,9 +141,8 @@
stats stats.Handler
}
-func (ht *serverHandlerTransport) Close() error {
+func (ht *serverHandlerTransport) Close() {
ht.closeOnce.Do(ht.closeCloseChanOnce)
- return nil
}
func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) }
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index 1cc586f..f0c72d3 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -24,6 +24,8 @@
"io"
"math"
"net"
+ "net/http"
+ "path/filepath"
"strconv"
"strings"
"sync"
@@ -32,15 +34,18 @@
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
-
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
- "google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/channelz"
+ icredentials "google.golang.org/grpc/internal/credentials"
+ "google.golang.org/grpc/internal/grpcutil"
+ imetadata "google.golang.org/grpc/internal/metadata"
"google.golang.org/grpc/internal/syscall"
+ "google.golang.org/grpc/internal/transport/networktype"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
+ "google.golang.org/grpc/resolver"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
)
@@ -57,7 +62,7 @@
cancel context.CancelFunc
ctxDone <-chan struct{} // Cache the ctx.Done() chan.
userAgent string
- md interface{}
+ md metadata.MD
conn net.Conn // underlying communication channel
loopy *loopyWriter
remoteAddr net.Addr
@@ -112,6 +117,9 @@
// goAwayReason records the http2.ErrCode and debug data received with the
// GoAway frame.
goAwayReason GoAwayReason
+ // goAwayDebugMessage contains a detailed human readable string about a
+ // GoAway frame, useful for error messages.
+ goAwayDebugMessage string
// A condition variable used to signal when the keepalive goroutine should
// go dormant. The condition for dormancy is based on the number of active
// streams and the `PermitWithoutStream` keepalive client parameter. And
@@ -135,11 +143,34 @@
connectionID uint64
}
-func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
+func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) {
+ address := addr.Addr
+ networkType, ok := networktype.Get(addr)
if fn != nil {
- return fn(ctx, addr)
+ // Special handling for unix scheme with custom dialer. Back in the day,
+ // we did not have a unix resolver and therefore targets with a unix
+ // scheme would end up using the passthrough resolver. So, user's used a
+ // custom dialer in this case and expected the original dial target to
+ // be passed to the custom dialer. Now, we have a unix resolver. But if
+ // a custom dialer is specified, we want to retain the old behavior in
+ // terms of the address being passed to the custom dialer.
+ if networkType == "unix" && !strings.HasPrefix(address, "\x00") {
+ // Supported unix targets are either "unix://absolute-path" or
+ // "unix:relative-path".
+ if filepath.IsAbs(address) {
+ return fn(ctx, "unix://"+address)
+ }
+ return fn(ctx, "unix:"+address)
+ }
+ return fn(ctx, address)
}
- return (&net.Dialer{}).DialContext(ctx, "tcp", addr)
+ if !ok {
+ networkType, address = parseDialTarget(address)
+ }
+ if networkType == "tcp" && useProxy {
+ return proxyDial(ctx, address, grpcUA)
+ }
+ return (&net.Dialer{}).DialContext(ctx, networkType, address)
}
func isTemporary(err error) bool {
@@ -161,7 +192,7 @@
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
// and starts to receive messages on it. Non-nil error returns if construction
// fails.
-func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
+func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
scheme := "http"
ctx, cancel := context.WithCancel(ctx)
defer func() {
@@ -170,7 +201,13 @@
}
}()
- conn, err := dial(connectCtx, opts.Dialer, addr.Addr)
+ // gRPC, resolver, balancer etc. can specify arbitrary data in the
+ // Attributes field of resolver.Address, which is shoved into connectCtx
+ // and passed to the dialer and credential handshaker. This makes it possible for
+ // address specific arbitrary data to reach custom dialers and credential handshakers.
+ connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes})
+
+ conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent)
if err != nil {
if opts.FailOnNonTempDialError {
return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
@@ -214,12 +251,34 @@
}
}
if transportCreds != nil {
- scheme = "https"
- conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn)
+ rawConn := conn
+ // Pull the deadline from the connectCtx, which will be used for
+ // timeouts in the authentication protocol handshake. Can ignore the
+ // boolean as the deadline will return the zero value, which will make
+ // the conn not timeout on I/O operations.
+ deadline, _ := connectCtx.Deadline()
+ rawConn.SetDeadline(deadline)
+ conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn)
+ rawConn.SetDeadline(time.Time{})
if err != nil {
return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
}
+ for _, cd := range perRPCCreds {
+ if cd.RequireTransportSecurity() {
+ if ci, ok := authInfo.(interface {
+ GetCommonAuthInfo() credentials.CommonAuthInfo
+ }); ok {
+ secLevel := ci.GetCommonAuthInfo().SecurityLevel
+ if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity {
+ return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection")
+ }
+ }
+ }
+ }
isSecure = true
+ if transportCreds.Info().SecurityProtocol == "tls" {
+ scheme = "https"
+ }
}
dynamicWindow := true
icwz := int32(initialWindowSize)
@@ -238,7 +297,6 @@
ctxDone: ctx.Done(), // Cache Done chan.
cancel: cancel,
userAgent: opts.UserAgent,
- md: addr.Metadata,
conn: conn,
remoteAddr: conn.RemoteAddr(),
localAddr: conn.LocalAddr(),
@@ -266,6 +324,12 @@
keepaliveEnabled: keepaliveEnabled,
bufferPool: newBufferPool(),
}
+
+ if md, ok := addr.Metadata.(*metadata.MD); ok {
+ t.md = *md
+ } else if md := imetadata.Get(addr); md != nil {
+ t.md = md
+ }
t.controlBuf = newControlBuffer(t.ctxDone)
if opts.InitialWindowSize >= defaultWindowSize {
t.initialWindowSize = opts.InitialWindowSize
@@ -302,12 +366,14 @@
// Send connection preface to server.
n, err := t.conn.Write(clientPreface)
if err != nil {
- t.Close()
- return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
+ err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
+ t.Close(err)
+ return nil, err
}
if n != len(clientPreface) {
- t.Close()
- return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
+ err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
+ t.Close(err)
+ return nil, err
}
var ss []http2.Setting
@@ -325,14 +391,16 @@
}
err = t.framer.fr.WriteSettings(ss...)
if err != nil {
- t.Close()
- return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
+ err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
+ t.Close(err)
+ return nil, err
}
// Adjust the connection flow control window if needed.
if delta := uint32(icwz - defaultWindowSize); delta > 0 {
if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil {
- t.Close()
- return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err)
+ err = connectionErrorf(true, err, "transport: failed to write window update: %v", err)
+ t.Close(err)
+ return nil, err
}
}
@@ -345,13 +413,14 @@
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
err := t.loopy.run()
if err != nil {
- errorf("transport: loopyWriter.run returning. Err: %v", err)
+ if logger.V(logLevel) {
+ logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
+ }
}
- // If it's a connection error, let reader goroutine handle it
- // since there might be data in the buffers.
- if _, ok := err.(net.Error); !ok {
- t.conn.Close()
- }
+ // Do not close the transport. Let reader goroutine handle it since
+ // there might be data in the buffers.
+ t.conn.Close()
+ t.controlBuf.finish()
close(t.writerDone)
}()
return t, nil
@@ -367,6 +436,7 @@
buf: newRecvBuffer(),
headerChan: make(chan struct{}),
contentSubtype: callHdr.ContentSubtype,
+ doneFunc: callHdr.DoneFunc,
}
s.wq = newWriteQuota(defaultWriteQuota, s.done)
s.requestRead = func(n int) {
@@ -406,7 +476,7 @@
Method: callHdr.Method,
AuthInfo: t.authInfo,
}
- ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri)
+ ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri)
authData, err := t.getTrAuthData(ctxWithRequestInfo, aud)
if err != nil {
return nil, err
@@ -425,7 +495,7 @@
headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method})
headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
- headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)})
headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
if callHdr.PreviousAttempts > 0 {
@@ -440,7 +510,7 @@
// Send out timeout regardless its value. The server can detect timeout context by itself.
// TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire.
timeout := time.Until(dl)
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)})
}
for k, v := range authData {
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
@@ -469,25 +539,23 @@
for _, vv := range added {
for i, v := range vv {
if i%2 == 0 {
- k = v
+ k = strings.ToLower(v)
continue
}
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
if isReservedHeader(k) {
continue
}
- headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)})
+ headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
}
- if md, ok := t.md.(*metadata.MD); ok {
- for k, vv := range *md {
- if isReservedHeader(k) {
- continue
- }
- for _, v := range vv {
- headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
- }
+ for k, vv := range t.md {
+ if isReservedHeader(k) {
+ continue
+ }
+ for _, v := range vv {
+ headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
return headerFields, nil
@@ -520,7 +588,7 @@
return nil, err
}
- return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err)
+ return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err)
}
for k, v := range data {
// Capital header names are illegal in HTTP/2.
@@ -537,8 +605,11 @@
// Note: if these credentials are provided both via dial options and call
// options, then both sets of credentials will be applied.
if callCreds := callHdr.Creds; callCreds != nil {
- if !t.isSecure && callCreds.RequireTransportSecurity() {
- return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
+ if callCreds.RequireTransportSecurity() {
+ ri, _ := credentials.RequestInfoFromContext(ctx)
+ if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil {
+ return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
+ }
}
data, err := callCreds.GetRequestMetadata(ctx, audience)
if err != nil {
@@ -554,13 +625,35 @@
return callAuthData, nil
}
+// NewStreamError wraps an error and reports additional information. Typically
+// NewStream errors result in transparent retry, as they mean nothing went onto
+// the wire. However, there are two notable exceptions:
+//
+// 1. If the stream headers violate the max header list size allowed by the
+// server. In this case there is no reason to retry at all, as it is
+// assumed the RPC would continue to fail on subsequent attempts.
+// 2. If the credentials errored when requesting their headers. In this case,
+// it's possible a retry can fix the problem, but indefinitely transparently
+// retrying is not appropriate as it is likely the credentials, if they can
+// eventually succeed, would need I/O to do so.
+type NewStreamError struct {
+ Err error
+
+ DoNotRetry bool
+ DoNotTransparentRetry bool
+}
+
+func (e NewStreamError) Error() string {
+ return e.Err.Error()
+}
+
// NewStream creates a stream and registers it into the transport as "active"
-// streams.
+// streams. All non-nil errors returned will be *NewStreamError.
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
ctx = peer.NewContext(ctx, t.getPeer())
headerFields, err := t.createHeaderFields(ctx, callHdr)
if err != nil {
- return nil, err
+ return nil, &NewStreamError{Err: err, DoNotTransparentRetry: true}
}
s := t.newStream(ctx, callHdr)
cleanup := func(err error) {
@@ -660,23 +753,23 @@
return true
}, hdr)
if err != nil {
- return nil, err
+ return nil, &NewStreamError{Err: err}
}
if success {
break
}
if hdrListSizeErr != nil {
- return nil, hdrListSizeErr
+ return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true}
}
firstTry = false
select {
case <-ch:
- case <-s.ctx.Done():
- return nil, ContextErr(s.ctx.Err())
+ case <-ctx.Done():
+ return nil, &NewStreamError{Err: ContextErr(ctx.Err())}
case <-t.goAway:
- return nil, errStreamDrain
+ return nil, &NewStreamError{Err: errStreamDrain}
case <-t.ctx.Done():
- return nil, ErrConnClosing
+ return nil, &NewStreamError{Err: ErrConnClosing}
}
}
if t.statsHandler != nil {
@@ -771,6 +864,9 @@
t.controlBuf.executeAndPut(addBackStreamQuota, cleanup)
// This will unblock write.
close(s.done)
+ if s.doneFunc != nil {
+ s.doneFunc()
+ }
}
// Close kicks off the shutdown process of the transport. This should be called
@@ -780,12 +876,12 @@
// This method blocks until the addrConn that initiated this transport is
// re-connected. This happens because t.onClose() begins reconnect logic at the
// addrConn level and blocks until the addrConn is successfully connected.
-func (t *http2Client) Close() error {
+func (t *http2Client) Close(err error) {
t.mu.Lock()
// Make sure we only Close once.
if t.state == closing {
t.mu.Unlock()
- return nil
+ return
}
// Call t.onClose before setting the state to closing to prevent the client
// from attempting to create new streams ASAP.
@@ -801,13 +897,25 @@
t.mu.Unlock()
t.controlBuf.finish()
t.cancel()
- err := t.conn.Close()
+ t.conn.Close()
if channelz.IsOn() {
channelz.RemoveEntry(t.channelzID)
}
+ // Append info about previous goaways if there were any, since this may be important
+ // for understanding the root cause for this connection to be closed.
+ _, goAwayDebugMessage := t.GetGoAwayReason()
+
+ var st *status.Status
+ if len(goAwayDebugMessage) > 0 {
+ st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage)
+ err = st.Err()
+ } else {
+ st = status.New(codes.Unavailable, err.Error())
+ }
+
// Notify all active streams.
for _, s := range streams {
- t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false)
+ t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false)
}
if t.statsHandler != nil {
connEnd := &stats.ConnEnd{
@@ -815,7 +923,6 @@
}
t.statsHandler.HandleConn(t.ctx, connEnd)
}
- return err
}
// GracefulClose sets the state to draining, which prevents new streams from
@@ -834,7 +941,7 @@
active := len(t.activeStreams)
t.mu.Unlock()
if active == 0 {
- t.Close()
+ t.Close(ErrConnClosing)
return
}
t.controlBuf.put(&incomingGoAway{})
@@ -854,18 +961,10 @@
df := &dataFrame{
streamID: s.id,
endStream: opts.Last,
+ h: hdr,
+ d: data,
}
- if hdr != nil || data != nil { // If it's not an empty data frame.
- // Add some data to grpc message header so that we can equally
- // distribute bytes across frames.
- emptyLen := http2MaxFrameLen - len(hdr)
- if emptyLen > len(data) {
- emptyLen = len(data)
- }
- hdr = append(hdr, data[:emptyLen]...)
- data = data[emptyLen:]
- df.h, df.d = hdr, data
- // TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler.
+ if hdr != nil || data != nil { // If it's not an empty data frame, check quota.
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
return err
}
@@ -983,7 +1082,7 @@
}
// The server has closed the stream without sending trailers. Record that
// the read direction is closed, and set the status appropriately.
- if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) {
+ if f.StreamEnded() {
t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true)
}
}
@@ -999,7 +1098,9 @@
}
statusCode, ok := http2ErrConvTab[f.ErrCode]
if !ok {
- warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
+ if logger.V(logLevel) {
+ logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
+ }
statusCode = codes.Unknown
}
if statusCode == codes.Canceled {
@@ -1081,12 +1182,14 @@
return
}
if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
- infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
+ if logger.V(logLevel) {
+ logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
+ }
}
id := f.LastStreamID
- if id > 0 && id%2 != 1 {
+ if id > 0 && id%2 == 0 {
t.mu.Unlock()
- t.Close()
+ t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id))
return
}
// A client can receive multiple GoAways from the server (see
@@ -1104,7 +1207,7 @@
// If there are multiple GoAways the first one should always have an ID greater than the following ones.
if id > t.prevGoAwayID {
t.mu.Unlock()
- t.Close()
+ t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID))
return
}
default:
@@ -1134,7 +1237,7 @@
active := len(t.activeStreams)
t.mu.Unlock()
if active == 0 {
- t.Close()
+ t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams"))
}
}
@@ -1150,12 +1253,17 @@
t.goAwayReason = GoAwayTooManyPings
}
}
+ if len(f.DebugData()) == 0 {
+ t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode)
+ } else {
+ t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData()))
+ }
}
-func (t *http2Client) GetGoAwayReason() GoAwayReason {
+func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) {
t.mu.Lock()
defer t.mu.Unlock()
- return t.goAwayReason
+ return t.goAwayReason, t.goAwayDebugMessage
}
func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
@@ -1182,35 +1290,128 @@
return
}
- state := &decodeState{}
- // Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode.
- state.data.isGRPC = !initialHeader
- if err := state.decodeHeader(frame); err != nil {
- t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream)
+ // frame.Truncated is set to true when framer detects that the current header
+ // list size hits MaxHeaderListSize limit.
+ if frame.Truncated {
+ se := status.New(codes.Internal, "peer header list size exceeded limit")
+ t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream)
+ return
+ }
+
+ var (
+ // If a gRPC Response-Headers has already been received, then it means
+ // that the peer is speaking gRPC and we are in gRPC mode.
+ isGRPC = !initialHeader
+ mdata = make(map[string][]string)
+ contentTypeErr = "malformed header: missing HTTP content-type"
+ grpcMessage string
+ statusGen *status.Status
+ recvCompress string
+ httpStatusCode *int
+ httpStatusErr string
+ rawStatusCode = codes.Unknown
+ // headerError is set if an error is encountered while parsing the headers
+ headerError string
+ )
+
+ if initialHeader {
+ httpStatusErr = "malformed header: missing HTTP status"
+ }
+
+ for _, hf := range frame.Fields {
+ switch hf.Name {
+ case "content-type":
+ if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType {
+ contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value)
+ break
+ }
+ contentTypeErr = ""
+ mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
+ isGRPC = true
+ case "grpc-encoding":
+ recvCompress = hf.Value
+ case "grpc-status":
+ code, err := strconv.ParseInt(hf.Value, 10, 32)
+ if err != nil {
+ se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err))
+ t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+ return
+ }
+ rawStatusCode = codes.Code(uint32(code))
+ case "grpc-message":
+ grpcMessage = decodeGrpcMessage(hf.Value)
+ case "grpc-status-details-bin":
+ var err error
+ statusGen, err = decodeGRPCStatusDetails(hf.Value)
+ if err != nil {
+ headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err)
+ }
+ case ":status":
+ if hf.Value == "200" {
+ httpStatusErr = ""
+ statusCode := 200
+ httpStatusCode = &statusCode
+ break
+ }
+
+ c, err := strconv.ParseInt(hf.Value, 10, 32)
+ if err != nil {
+ se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err))
+ t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+ return
+ }
+ statusCode := int(c)
+ httpStatusCode = &statusCode
+
+ httpStatusErr = fmt.Sprintf(
+ "unexpected HTTP status code received from server: %d (%s)",
+ statusCode,
+ http.StatusText(statusCode),
+ )
+ default:
+ if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
+ break
+ }
+ v, err := decodeMetadataHeader(hf.Name, hf.Value)
+ if err != nil {
+ headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err)
+ logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
+ break
+ }
+ mdata[hf.Name] = append(mdata[hf.Name], v)
+ }
+ }
+
+ if !isGRPC || httpStatusErr != "" {
+ var code = codes.Internal // when header does not include HTTP status, return INTERNAL
+
+ if httpStatusCode != nil {
+ var ok bool
+ code, ok = HTTPStatusConvTab[*httpStatusCode]
+ if !ok {
+ code = codes.Unknown
+ }
+ }
+ var errs []string
+ if httpStatusErr != "" {
+ errs = append(errs, httpStatusErr)
+ }
+ if contentTypeErr != "" {
+ errs = append(errs, contentTypeErr)
+ }
+ // Verify the HTTP response is a 200.
+ se := status.New(code, strings.Join(errs, "; "))
+ t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+ return
+ }
+
+ if headerError != "" {
+ se := status.New(codes.Internal, headerError)
+ t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
return
}
isHeader := false
- defer func() {
- if t.statsHandler != nil {
- if isHeader {
- inHeader := &stats.InHeader{
- Client: true,
- WireLength: int(frame.Header().Length),
- Header: s.header.Copy(),
- Compression: s.recvCompress,
- }
- t.statsHandler.HandleRPC(s.ctx, inHeader)
- } else {
- inTrailer := &stats.InTrailer{
- Client: true,
- WireLength: int(frame.Header().Length),
- Trailer: s.trailer.Copy(),
- }
- t.statsHandler.HandleRPC(s.ctx, inTrailer)
- }
- }
- }()
// If headerChan hasn't been closed yet
if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
@@ -1221,9 +1422,9 @@
// These values can be set without any synchronization because
// stream goroutine will read it only after seeing a closed
// headerChan which we'll close after setting this.
- s.recvCompress = state.data.encoding
- if len(state.data.mdata) > 0 {
- s.header = state.data.mdata
+ s.recvCompress = recvCompress
+ if len(mdata) > 0 {
+ s.header = mdata
}
} else {
// HEADERS frame block carries a Trailers-Only.
@@ -1232,13 +1433,36 @@
close(s.headerChan)
}
+ if t.statsHandler != nil {
+ if isHeader {
+ inHeader := &stats.InHeader{
+ Client: true,
+ WireLength: int(frame.Header().Length),
+ Header: metadata.MD(mdata).Copy(),
+ Compression: s.recvCompress,
+ }
+ t.statsHandler.HandleRPC(s.ctx, inHeader)
+ } else {
+ inTrailer := &stats.InTrailer{
+ Client: true,
+ WireLength: int(frame.Header().Length),
+ Trailer: metadata.MD(mdata).Copy(),
+ }
+ t.statsHandler.HandleRPC(s.ctx, inTrailer)
+ }
+ }
+
if !endStream {
return
}
+ if statusGen == nil {
+ statusGen = status.New(rawStatusCode, grpcMessage)
+ }
+
// if client received END_STREAM from server while stream was still active, send RST_STREAM
rst := s.getState() == streamActive
- t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true)
+ t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true)
}
// reader runs as a separate goroutine in charge of reading data from network
@@ -1252,7 +1476,8 @@
// Check the validity of server preface.
frame, err := t.framer.fr.ReadFrame()
if err != nil {
- t.Close() // this kicks off resetTransport, so must be last before return
+ err = connectionErrorf(true, err, "error reading server preface: %v", err)
+ t.Close(err) // this kicks off resetTransport, so must be last before return
return
}
t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
@@ -1261,7 +1486,8 @@
}
sf, ok := frame.(*http2.SettingsFrame)
if !ok {
- t.Close() // this kicks off resetTransport, so must be last before return
+ // this kicks off resetTransport, so must be last before return
+ t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame))
return
}
t.onPrefaceReceipt()
@@ -1285,13 +1511,19 @@
if s != nil {
// use error detail to provide better err message
code := http2ErrConvTab[se.Code]
- msg := t.framer.fr.ErrorDetail().Error()
+ errorDetail := t.framer.fr.ErrorDetail()
+ var msg string
+ if errorDetail != nil {
+ msg = errorDetail.Error()
+ } else {
+ msg = "received invalid frame"
+ }
t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false)
}
continue
} else {
// Transport error.
- t.Close()
+ t.Close(connectionErrorf(true, err, "error reading from server: %v", err))
return
}
}
@@ -1311,7 +1543,9 @@
case *http2.WindowUpdateFrame:
t.handleWindowUpdate(frame)
default:
- errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
+ if logger.V(logLevel) {
+ logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
+ }
}
}
}
@@ -1323,7 +1557,7 @@
return b
}
-// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
+// keepalive running in a separate goroutine makes sure the connection is alive by sending pings.
func (t *http2Client) keepalive() {
p := &ping{data: [8]byte{}}
// True iff a ping has been sent, and no data has been received since then.
@@ -1348,7 +1582,7 @@
continue
}
if outstandingPing && timeoutLeft <= 0 {
- t.Close()
+ t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout"))
return
}
t.mu.Lock()
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index fa33ffb..2c6eaf0 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -26,6 +26,7 @@
"io"
"math"
"net"
+ "net/http"
"strconv"
"sync"
"sync/atomic"
@@ -34,10 +35,10 @@
"github.com/golang/protobuf/proto"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
+ "google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/keepalive"
@@ -72,7 +73,6 @@
writerDone chan struct{} // sync point to enable testing.
remoteAddr net.Addr
localAddr net.Addr
- maxStreamID uint32 // max stream ID ever seen
authInfo credentials.AuthInfo // auth info about the connection
inTapHandle tap.ServerInHandle
framer *framer
@@ -101,11 +101,11 @@
mu sync.Mutex // guard the following
- // drainChan is initialized when drain(...) is called the first time.
+ // drainChan is initialized when Drain() is called the first time.
// After which the server writes out the first GoAway(with ID 2^31-1) frame.
// Then an independent goroutine will be launched to later send the second GoAway.
// During this time we don't want to write another first GoAway(with ID 2^31 -1) frame.
- // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is
+ // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is
// already underway.
drainChan chan struct{}
state transportState
@@ -122,11 +122,37 @@
bufferPool *bufferPool
connectionID uint64
+
+ // maxStreamMu guards the maximum stream ID
+ // This lock may not be taken if mu is already held.
+ maxStreamMu sync.Mutex
+ maxStreamID uint32 // max stream ID ever seen
}
-// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
-// returned if something goes wrong.
-func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
+// NewServerTransport creates a http2 transport with conn and configuration
+// options from config.
+//
+// It returns a non-nil transport and a nil error on success. On failure, it
+// returns a nil transport and a non-nil error. For a special case where the
+// underlying conn gets closed before the client preface could be read, it
+// returns a nil transport and a nil error.
+func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
+ var authInfo credentials.AuthInfo
+ rawConn := conn
+ if config.Credentials != nil {
+ var err error
+ conn, authInfo, err = config.Credentials.ServerHandshake(rawConn)
+ if err != nil {
+ // ErrConnDispatched means that the connection was dispatched away
+ // from gRPC; those connections should be left open. io.EOF means
+ // the connection was closed before handshaking completed, which can
+ // happen naturally from probers. Return these errors directly.
+ if err == credentials.ErrConnDispatched || err == io.EOF {
+ return nil, err
+ }
+ return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
+ }
+ }
writeBufSize := config.WriteBufferSize
readBufSize := config.ReadBufferSize
maxHeaderListSize := defaultServerMaxHeaderListSize
@@ -209,14 +235,15 @@
if kep.MinTime == 0 {
kep.MinTime = defaultKeepalivePolicyMinTime
}
+
done := make(chan struct{})
t := &http2Server{
- ctx: context.Background(),
+ ctx: setConnection(context.Background(), rawConn),
done: done,
conn: conn,
remoteAddr: conn.RemoteAddr(),
localAddr: conn.LocalAddr(),
- authInfo: config.AuthInfo,
+ authInfo: authInfo,
framer: framer,
readerDone: make(chan struct{}),
writerDone: make(chan struct{}),
@@ -265,6 +292,14 @@
// Check the validity of client preface.
preface := make([]byte, len(clientPreface))
if _, err := io.ReadFull(t.conn, preface); err != nil {
+ // In deployments where a gRPC server runs behind a cloud load balancer
+ // which performs regular TCP level health checks, the connection is
+ // closed immediately by the latter. Returning io.EOF here allows the
+ // grpc server implementation to recognize this scenario and suppress
+ // logging to reduce spam.
+ if err == io.EOF {
+ return nil, io.EOF
+ }
return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
}
if !bytes.Equal(preface, clientPreface) {
@@ -289,9 +324,12 @@
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
if err := t.loopy.run(); err != nil {
- errorf("transport: loopyWriter.run returning. Err: %v", err)
+ if logger.V(logLevel) {
+ logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
+ }
}
t.conn.Close()
+ t.controlBuf.finish()
close(t.writerDone)
}()
go t.keepalive()
@@ -300,38 +338,144 @@
// operateHeader takes action on the decoded headers.
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
+ // Acquire max stream ID lock for entire duration
+ t.maxStreamMu.Lock()
+ defer t.maxStreamMu.Unlock()
+
streamID := frame.Header().StreamID
- state := &decodeState{
- serverSide: true,
- }
- if err := state.decodeHeader(frame); err != nil {
- if se, ok := status.FromError(err); ok {
- t.controlBuf.put(&cleanupStream{
- streamID: streamID,
- rst: true,
- rstCode: statusCodeConvTab[se.Code()],
- onWrite: func() {},
- })
- }
+
+ // frame.Truncated is set to true when framer detects that the current header
+ // list size hits MaxHeaderListSize limit.
+ if frame.Truncated {
+ t.controlBuf.put(&cleanupStream{
+ streamID: streamID,
+ rst: true,
+ rstCode: http2.ErrCodeFrameSize,
+ onWrite: func() {},
+ })
return false
}
+ if streamID%2 != 1 || streamID <= t.maxStreamID {
+ // illegal gRPC stream id.
+ if logger.V(logLevel) {
+ logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
+ }
+ return true
+ }
+ t.maxStreamID = streamID
+
buf := newRecvBuffer()
s := &Stream{
- id: streamID,
- st: t,
- buf: buf,
- fc: &inFlow{limit: uint32(t.initialWindowSize)},
- recvCompress: state.data.encoding,
- method: state.data.method,
- contentSubtype: state.data.contentSubtype,
+ id: streamID,
+ st: t,
+ buf: buf,
+ fc: &inFlow{limit: uint32(t.initialWindowSize)},
}
+ var (
+ // If a gRPC Response-Headers has already been received, then it means
+ // that the peer is speaking gRPC and we are in gRPC mode.
+ isGRPC = false
+ mdata = make(map[string][]string)
+ httpMethod string
+ // headerError is set if an error is encountered while parsing the headers
+ headerError bool
+
+ timeoutSet bool
+ timeout time.Duration
+ )
+
+ for _, hf := range frame.Fields {
+ switch hf.Name {
+ case "content-type":
+ contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value)
+ if !validContentType {
+ break
+ }
+ mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
+ s.contentSubtype = contentSubtype
+ isGRPC = true
+ case "grpc-encoding":
+ s.recvCompress = hf.Value
+ case ":method":
+ httpMethod = hf.Value
+ case ":path":
+ s.method = hf.Value
+ case "grpc-timeout":
+ timeoutSet = true
+ var err error
+ if timeout, err = decodeTimeout(hf.Value); err != nil {
+ headerError = true
+ }
+ // "Transports must consider requests containing the Connection header
+ // as malformed." - A41
+ case "connection":
+ if logger.V(logLevel) {
+ logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec")
+ }
+ headerError = true
+ default:
+ if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
+ break
+ }
+ v, err := decodeMetadataHeader(hf.Name, hf.Value)
+ if err != nil {
+ headerError = true
+ logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
+ break
+ }
+ mdata[hf.Name] = append(mdata[hf.Name], v)
+ }
+ }
+
+ // "If multiple Host headers or multiple :authority headers are present, the
+ // request must be rejected with an HTTP status code 400 as required by Host
+ // validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM
+ // with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2
+ // error, this takes precedence over a client not speaking gRPC.
+ if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 {
+ errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"]))
+ if logger.V(logLevel) {
+ logger.Errorf("transport: %v", errMsg)
+ }
+ t.controlBuf.put(&earlyAbortStream{
+ httpStatus: 400,
+ streamID: streamID,
+ contentSubtype: s.contentSubtype,
+ status: status.New(codes.Internal, errMsg),
+ })
+ return false
+ }
+
+ if !isGRPC || headerError {
+ t.controlBuf.put(&cleanupStream{
+ streamID: streamID,
+ rst: true,
+ rstCode: http2.ErrCodeProtocol,
+ onWrite: func() {},
+ })
+ return false
+ }
+
+ // "If :authority is missing, Host must be renamed to :authority." - A41
+ if len(mdata[":authority"]) == 0 {
+ // No-op if host isn't present, no eventual :authority header is a valid
+ // RPC.
+ if host, ok := mdata["host"]; ok {
+ mdata[":authority"] = host
+ delete(mdata, "host")
+ }
+ } else {
+ // "If :authority is present, Host must be discarded" - A41
+ delete(mdata, "host")
+ }
+
if frame.StreamEnded() {
// s is just created by the caller. No lock needed.
s.state = streamReadDone
}
- if state.data.timeoutSet {
- s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout)
+ if timeoutSet {
+ s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout)
} else {
s.ctx, s.cancel = context.WithCancel(t.ctx)
}
@@ -344,31 +488,13 @@
}
s.ctx = peer.NewContext(s.ctx, pr)
// Attach the received metadata to the context.
- if len(state.data.mdata) > 0 {
- s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata)
- }
- if state.data.statsTags != nil {
- s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags)
- }
- if state.data.statsTrace != nil {
- s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace)
- }
- if t.inTapHandle != nil {
- var err error
- info := &tap.Info{
- FullMethodName: state.data.method,
+ if len(mdata) > 0 {
+ s.ctx = metadata.NewIncomingContext(s.ctx, mdata)
+ if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 {
+ s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1]))
}
- s.ctx, err = t.inTapHandle(s.ctx, info)
- if err != nil {
- warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
- t.controlBuf.put(&cleanupStream{
- streamID: s.id,
- rst: true,
- rstCode: http2.ErrCodeRefusedStream,
- onWrite: func() {},
- })
- s.cancel()
- return false
+ if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 {
+ s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1]))
}
}
t.mu.Lock()
@@ -388,14 +514,40 @@
s.cancel()
return false
}
- if streamID%2 != 1 || streamID <= t.maxStreamID {
+ if httpMethod != http.MethodPost {
t.mu.Unlock()
- // illegal gRPC stream id.
- errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
+ if logger.V(logLevel) {
+ logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod)
+ }
+ t.controlBuf.put(&cleanupStream{
+ streamID: streamID,
+ rst: true,
+ rstCode: http2.ErrCodeProtocol,
+ onWrite: func() {},
+ })
s.cancel()
- return true
+ return false
}
- t.maxStreamID = streamID
+ if t.inTapHandle != nil {
+ var err error
+ if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil {
+ t.mu.Unlock()
+ if logger.V(logLevel) {
+ logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
+ }
+ stat, ok := status.FromError(err)
+ if !ok {
+ stat = status.New(codes.PermissionDenied, err.Error())
+ }
+ t.controlBuf.put(&earlyAbortStream{
+ httpStatus: 200,
+ streamID: s.id,
+ contentSubtype: s.contentSubtype,
+ status: stat,
+ })
+ return false
+ }
+ }
t.activeStreams[streamID] = s
if len(t.activeStreams) == 1 {
t.idle = time.Time{}
@@ -417,7 +569,7 @@
LocalAddr: t.localAddr,
Compression: s.recvCompress,
WireLength: int(frame.Header().Length),
- Header: metadata.MD(state.data.mdata).Copy(),
+ Header: metadata.MD(mdata).Copy(),
}
t.stats.HandleRPC(s.ctx, inHeader)
}
@@ -454,7 +606,9 @@
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
if err != nil {
if se, ok := err.(http2.StreamError); ok {
- warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
+ if logger.V(logLevel) {
+ logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
+ }
t.mu.Lock()
s := t.activeStreams[se.StreamID]
t.mu.Unlock()
@@ -474,7 +628,9 @@
t.Close()
return
}
- warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
+ if logger.V(logLevel) {
+ logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
+ }
t.Close()
return
}
@@ -497,7 +653,9 @@
case *http2.GoAwayFrame:
// TODO: Handle GoAway from the client appropriately.
default:
- errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
+ if logger.V(logLevel) {
+ logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
+ }
}
}
}
@@ -599,6 +757,10 @@
if !ok {
return
}
+ if s.getState() == streamReadDone {
+ t.closeStream(s, true, http2.ErrCodeStreamClosed, false)
+ return
+ }
if size > 0 {
if err := s.fc.onData(size); err != nil {
t.closeStream(s, true, http2.ErrCodeFlowControl, false)
@@ -619,7 +781,7 @@
s.write(recvMsg{buffer: buffer})
}
}
- if f.Header().Flags.Has(http2.FlagDataEndStream) {
+ if f.StreamEnded() {
// Received the end of stream from the client.
s.compareAndSwapState(streamActive, streamReadDone)
s.write(recvMsg{err: io.EOF})
@@ -719,7 +881,9 @@
if t.pingStrikes > maxPingStrikes {
// Send goaway and close the connection.
- errorf("transport: Got too many pings from the client, closing the connection.")
+ if logger.V(logLevel) {
+ logger.Errorf("transport: Got too many pings from the client, closing the connection.")
+ }
t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
}
}
@@ -752,7 +916,9 @@
var sz int64
for _, f := range hdrFrame.hf {
if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
- errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
+ if logger.V(logLevel) {
+ logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
+ }
return false
}
}
@@ -789,7 +955,7 @@
// first and create a slice of that exact size.
headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
- headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
if s.sendCompress != "" {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
}
@@ -839,7 +1005,7 @@
}
} else { // Send a trailer only response.
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
- headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
}
}
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
@@ -849,7 +1015,7 @@
stBytes, err := proto.Marshal(p)
if err != nil {
// TODO: return error instead, when callers are able to handle it.
- grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
+ logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
} else {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
}
@@ -909,13 +1075,6 @@
return ContextErr(s.ctx.Err())
}
}
- // Add some data to header frame so that we can equally distribute bytes across frames.
- emptyLen := http2MaxFrameLen - len(hdr)
- if emptyLen > len(data) {
- emptyLen = len(data)
- }
- hdr = append(hdr, data[:emptyLen]...)
- data = data[emptyLen:]
df := &dataFrame{
streamID: s.id,
h: hdr,
@@ -977,17 +1136,19 @@
if val <= 0 {
// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
// Gracefully close the connection.
- t.drain(http2.ErrCodeNo, []byte{})
+ t.Drain()
return
}
idleTimer.Reset(val)
case <-ageTimer.C:
- t.drain(http2.ErrCodeNo, []byte{})
+ t.Drain()
ageTimer.Reset(t.kp.MaxConnectionAgeGrace)
select {
case <-ageTimer.C:
// Close the connection after grace period.
- infof("transport: closing server transport due to maximum connection age.")
+ if logger.V(logLevel) {
+ logger.Infof("transport: closing server transport due to maximum connection age.")
+ }
t.Close()
case <-t.done:
}
@@ -1004,7 +1165,9 @@
continue
}
if outstandingPing && kpTimeoutLeft <= 0 {
- infof("transport: closing server transport due to idleness.")
+ if logger.V(logLevel) {
+ logger.Infof("transport: closing server transport due to idleness.")
+ }
t.Close()
return
}
@@ -1032,11 +1195,11 @@
// Close starts shutting down the http2Server transport.
// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
// could cause some resource issue. Revisit this later.
-func (t *http2Server) Close() error {
+func (t *http2Server) Close() {
t.mu.Lock()
if t.state == closing {
t.mu.Unlock()
- return errors.New("transport: Close() was already called")
+ return
}
t.state = closing
streams := t.activeStreams
@@ -1044,7 +1207,9 @@
t.mu.Unlock()
t.controlBuf.finish()
close(t.done)
- err := t.conn.Close()
+ if err := t.conn.Close(); err != nil && logger.V(logLevel) {
+ logger.Infof("transport: error closing conn during Close: %v", err)
+ }
if channelz.IsOn() {
channelz.RemoveEntry(t.channelzID)
}
@@ -1056,7 +1221,6 @@
connEnd := &stats.ConnEnd{}
t.stats.HandleConn(t.ctx, connEnd)
}
- return err
}
// deleteStream deletes the stream s from transport's active streams.
@@ -1121,17 +1285,13 @@
}
func (t *http2Server) Drain() {
- t.drain(http2.ErrCodeNo, []byte{})
-}
-
-func (t *http2Server) drain(code http2.ErrCode, debugData []byte) {
t.mu.Lock()
defer t.mu.Unlock()
if t.drainChan != nil {
return
}
t.drainChan = make(chan struct{})
- t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true})
+ t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true})
}
var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
@@ -1139,20 +1299,23 @@
// Handles outgoing GoAway and returns true if loopy needs to put itself
// in draining mode.
func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
+ t.maxStreamMu.Lock()
t.mu.Lock()
if t.state == closing { // TODO(mmukhi): This seems unnecessary.
t.mu.Unlock()
+ t.maxStreamMu.Unlock()
// The transport is closing.
return false, ErrConnClosing
}
- sid := t.maxStreamID
if !g.headsUp {
// Stop accepting more streams now.
t.state = draining
+ sid := t.maxStreamID
if len(t.activeStreams) == 0 {
g.closeConn = true
}
t.mu.Unlock()
+ t.maxStreamMu.Unlock()
if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
return false, err
}
@@ -1165,6 +1328,7 @@
return true, nil
}
t.mu.Unlock()
+ t.maxStreamMu.Unlock()
// For a graceful close, send out a GoAway with stream ID of MaxUInt32,
// Follow that with a ping and wait for the ack to come back or a timer
// to expire. During this time accept new streams since they might have
@@ -1249,3 +1413,18 @@
j := grpcrand.Int63n(2*r) - r
return time.Duration(j)
}
+
+type connectionKey struct{}
+
+// GetConnection gets the connection from the context.
+func GetConnection(ctx context.Context) net.Conn {
+ conn, _ := ctx.Value(connectionKey{}).(net.Conn)
+ return conn
+}
+
+// SetConnection adds the connection to the context to be able to get
+// information about the destination ip and port for an incoming RPC. This also
+// allows any unary or streaming interceptors to see the connection.
+func setConnection(ctx context.Context, conn net.Conn) context.Context {
+ return context.WithValue(ctx, connectionKey{}, conn)
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
index 8f5f334..d8247bc 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -27,6 +27,7 @@
"math"
"net"
"net/http"
+ "net/url"
"strconv"
"strings"
"time"
@@ -37,6 +38,7 @@
"golang.org/x/net/http2/hpack"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
@@ -50,7 +52,7 @@
// "proto" as a suffix after "+" or ";". See
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
// for more details.
- baseContentType = "application/grpc"
+
)
var (
@@ -71,13 +73,6 @@
http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
http2.ErrCodeHTTP11Required: codes.Internal,
}
- statusCodeConvTab = map[codes.Code]http2.ErrCode{
- codes.Internal: http2.ErrCodeInternal,
- codes.Canceled: http2.ErrCodeCancel,
- codes.Unavailable: http2.ErrCodeRefusedStream,
- codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
- codes.PermissionDenied: http2.ErrCodeInadequateSecurity,
- }
// HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table.
HTTPStatusConvTab = map[int]codes.Code{
// 400 Bad Request - INTERNAL.
@@ -97,54 +92,9 @@
// 504 Gateway timeout - UNAVAILABLE.
http.StatusGatewayTimeout: codes.Unavailable,
}
+ logger = grpclog.Component("transport")
)
-type parsedHeaderData struct {
- encoding string
- // statusGen caches the stream status received from the trailer the server
- // sent. Client side only. Do not access directly. After all trailers are
- // parsed, use the status method to retrieve the status.
- statusGen *status.Status
- // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not
- // intended for direct access outside of parsing.
- rawStatusCode *int
- rawStatusMsg string
- httpStatus *int
- // Server side only fields.
- timeoutSet bool
- timeout time.Duration
- method string
- // key-value metadata map from the peer.
- mdata map[string][]string
- statsTags []byte
- statsTrace []byte
- contentSubtype string
-
- // isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP).
- //
- // We are in gRPC mode (peer speaking gRPC) if:
- // * We are client side and have already received a HEADER frame that indicates gRPC peer.
- // * The header contains valid a content-type, i.e. a string starts with "application/grpc"
- // And we should handle error specific to gRPC.
- //
- // Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we
- // are in HTTP fallback mode, and should handle error specific to HTTP.
- isGRPC bool
- grpcErr error
- httpErr error
- contentTypeErr string
-}
-
-// decodeState configures decoding criteria and records the decoded data.
-type decodeState struct {
- // whether decoding on server side or not
- serverSide bool
-
- // Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS
- // frame once decodeHeader function has been invoked and returned.
- data parsedHeaderData
-}
-
// isReservedHeader checks whether hdr belongs to HTTP2 headers
// reserved by gRPC protocol. Any other headers are classified as the
// user-specified metadata.
@@ -182,54 +132,6 @@
}
}
-// contentSubtype returns the content-subtype for the given content-type. The
-// given content-type must be a valid content-type that starts with
-// "application/grpc". A content-subtype will follow "application/grpc" after a
-// "+" or ";". See
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details.
-//
-// If contentType is not a valid content-type for gRPC, the boolean
-// will be false, otherwise true. If content-type == "application/grpc",
-// "application/grpc+", or "application/grpc;", the boolean will be true,
-// but no content-subtype will be returned.
-//
-// contentType is assumed to be lowercase already.
-func contentSubtype(contentType string) (string, bool) {
- if contentType == baseContentType {
- return "", true
- }
- if !strings.HasPrefix(contentType, baseContentType) {
- return "", false
- }
- // guaranteed since != baseContentType and has baseContentType prefix
- switch contentType[len(baseContentType)] {
- case '+', ';':
- // this will return true for "application/grpc+" or "application/grpc;"
- // which the previous validContentType function tested to be valid, so we
- // just say that no content-subtype is specified in this case
- return contentType[len(baseContentType)+1:], true
- default:
- return "", false
- }
-}
-
-// contentSubtype is assumed to be lowercase
-func contentType(contentSubtype string) string {
- if contentSubtype == "" {
- return baseContentType
- }
- return baseContentType + "+" + contentSubtype
-}
-
-func (d *decodeState) status() *status.Status {
- if d.data.statusGen == nil {
- // No status-details were provided; generate status using code/msg.
- d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg)
- }
- return d.data.statusGen
-}
-
const binHdrSuffix = "-bin"
func encodeBinHeader(v []byte) string {
@@ -259,164 +161,16 @@
return v, nil
}
-func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error {
- // frame.Truncated is set to true when framer detects that the current header
- // list size hits MaxHeaderListSize limit.
- if frame.Truncated {
- return status.Error(codes.Internal, "peer header list size exceeded limit")
+func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) {
+ v, err := decodeBinHeader(rawDetails)
+ if err != nil {
+ return nil, err
}
-
- for _, hf := range frame.Fields {
- d.processHeaderField(hf)
+ st := &spb.Status{}
+ if err = proto.Unmarshal(v, st); err != nil {
+ return nil, err
}
-
- if d.data.isGRPC {
- if d.data.grpcErr != nil {
- return d.data.grpcErr
- }
- if d.serverSide {
- return nil
- }
- if d.data.rawStatusCode == nil && d.data.statusGen == nil {
- // gRPC status doesn't exist.
- // Set rawStatusCode to be unknown and return nil error.
- // So that, if the stream has ended this Unknown status
- // will be propagated to the user.
- // Otherwise, it will be ignored. In which case, status from
- // a later trailer, that has StreamEnded flag set, is propagated.
- code := int(codes.Unknown)
- d.data.rawStatusCode = &code
- }
- return nil
- }
-
- // HTTP fallback mode
- if d.data.httpErr != nil {
- return d.data.httpErr
- }
-
- var (
- code = codes.Internal // when header does not include HTTP status, return INTERNAL
- ok bool
- )
-
- if d.data.httpStatus != nil {
- code, ok = HTTPStatusConvTab[*(d.data.httpStatus)]
- if !ok {
- code = codes.Unknown
- }
- }
-
- return status.Error(code, d.constructHTTPErrMsg())
-}
-
-// constructErrMsg constructs error message to be returned in HTTP fallback mode.
-// Format: HTTP status code and its corresponding message + content-type error message.
-func (d *decodeState) constructHTTPErrMsg() string {
- var errMsgs []string
-
- if d.data.httpStatus == nil {
- errMsgs = append(errMsgs, "malformed header: missing HTTP status")
- } else {
- errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus))
- }
-
- if d.data.contentTypeErr == "" {
- errMsgs = append(errMsgs, "transport: missing content-type field")
- } else {
- errMsgs = append(errMsgs, d.data.contentTypeErr)
- }
-
- return strings.Join(errMsgs, "; ")
-}
-
-func (d *decodeState) addMetadata(k, v string) {
- if d.data.mdata == nil {
- d.data.mdata = make(map[string][]string)
- }
- d.data.mdata[k] = append(d.data.mdata[k], v)
-}
-
-func (d *decodeState) processHeaderField(f hpack.HeaderField) {
- switch f.Name {
- case "content-type":
- contentSubtype, validContentType := contentSubtype(f.Value)
- if !validContentType {
- d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value)
- return
- }
- d.data.contentSubtype = contentSubtype
- // TODO: do we want to propagate the whole content-type in the metadata,
- // or come up with a way to just propagate the content-subtype if it was set?
- // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"}
- // in the metadata?
- d.addMetadata(f.Name, f.Value)
- d.data.isGRPC = true
- case "grpc-encoding":
- d.data.encoding = f.Value
- case "grpc-status":
- code, err := strconv.Atoi(f.Value)
- if err != nil {
- d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err)
- return
- }
- d.data.rawStatusCode = &code
- case "grpc-message":
- d.data.rawStatusMsg = decodeGrpcMessage(f.Value)
- case "grpc-status-details-bin":
- v, err := decodeBinHeader(f.Value)
- if err != nil {
- d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
- return
- }
- s := &spb.Status{}
- if err := proto.Unmarshal(v, s); err != nil {
- d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
- return
- }
- d.data.statusGen = status.FromProto(s)
- case "grpc-timeout":
- d.data.timeoutSet = true
- var err error
- if d.data.timeout, err = decodeTimeout(f.Value); err != nil {
- d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err)
- }
- case ":path":
- d.data.method = f.Value
- case ":status":
- code, err := strconv.Atoi(f.Value)
- if err != nil {
- d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err)
- return
- }
- d.data.httpStatus = &code
- case "grpc-tags-bin":
- v, err := decodeBinHeader(f.Value)
- if err != nil {
- d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err)
- return
- }
- d.data.statsTags = v
- d.addMetadata(f.Name, string(v))
- case "grpc-trace-bin":
- v, err := decodeBinHeader(f.Value)
- if err != nil {
- d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err)
- return
- }
- d.data.statsTrace = v
- d.addMetadata(f.Name, string(v))
- default:
- if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) {
- break
- }
- v, err := decodeMetadataHeader(f.Name, f.Value)
- if err != nil {
- errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
- return
- }
- d.addMetadata(f.Name, v)
- }
+ return status.FromProto(st), nil
}
type timeoutUnit uint8
@@ -449,41 +203,6 @@
return
}
-const maxTimeoutValue int64 = 100000000 - 1
-
-// div does integer division and round-up the result. Note that this is
-// equivalent to (d+r-1)/r but has less chance to overflow.
-func div(d, r time.Duration) int64 {
- if m := d % r; m > 0 {
- return int64(d/r + 1)
- }
- return int64(d / r)
-}
-
-// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it.
-func encodeTimeout(t time.Duration) string {
- if t <= 0 {
- return "0n"
- }
- if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "n"
- }
- if d := div(t, time.Microsecond); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "u"
- }
- if d := div(t, time.Millisecond); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "m"
- }
- if d := div(t, time.Second); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "S"
- }
- if d := div(t, time.Minute); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "M"
- }
- // Note that maxTimeoutValue * time.Hour > MaxInt64.
- return strconv.FormatInt(div(t, time.Hour), 10) + "H"
-}
-
func decodeTimeout(s string) (time.Duration, error) {
size := len(s)
if size < 2 {
@@ -675,3 +394,31 @@
f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
return f
}
+
+// parseDialTarget returns the network and address to pass to dialer.
+func parseDialTarget(target string) (string, string) {
+ net := "tcp"
+ m1 := strings.Index(target, ":")
+ m2 := strings.Index(target, ":/")
+ // handle unix:addr which will fail with url.Parse
+ if m1 >= 0 && m2 < 0 {
+ if n := target[0:m1]; n == "unix" {
+ return n, target[m1+1:]
+ }
+ }
+ if m2 >= 0 {
+ t, err := url.Parse(target)
+ if err != nil {
+ return net, target
+ }
+ scheme := t.Scheme
+ addr := t.Path
+ if scheme == "unix" {
+ if addr == "" {
+ addr = t.Host
+ }
+ return scheme, addr
+ }
+ }
+ return net, target
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/log.go b/vendor/google.golang.org/grpc/internal/transport/log.go
deleted file mode 100644
index 879df80..0000000
--- a/vendor/google.golang.org/grpc/internal/transport/log.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// This file contains wrappers for grpclog functions.
-// The transport package only logs to verbose level 2 by default.
-
-package transport
-
-import "google.golang.org/grpc/grpclog"
-
-const logLevel = 2
-
-func infof(format string, args ...interface{}) {
- if grpclog.V(logLevel) {
- grpclog.Infof(format, args...)
- }
-}
-
-func warningf(format string, args ...interface{}) {
- if grpclog.V(logLevel) {
- grpclog.Warningf(format, args...)
- }
-}
-
-func errorf(format string, args ...interface{}) {
- if grpclog.V(logLevel) {
- grpclog.Errorf(format, args...)
- }
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go
new file mode 100644
index 0000000..c11b527
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go
@@ -0,0 +1,46 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package networktype declares the network type to be used in the default
+// dialer. Attribute of a resolver.Address.
+package networktype
+
+import (
+ "google.golang.org/grpc/resolver"
+)
+
+// keyType is the key to use for storing State in Attributes.
+type keyType string
+
+const key = keyType("grpc.internal.transport.networktype")
+
+// Set returns a copy of the provided address with attributes containing networkType.
+func Set(address resolver.Address, networkType string) resolver.Address {
+ address.Attributes = address.Attributes.WithValue(key, networkType)
+ return address
+}
+
+// Get returns the network type in the resolver.Address and true, or "", false
+// if not present.
+func Get(address resolver.Address) (string, bool) {
+ v := address.Attributes.Value(key)
+ if v == nil {
+ return "", false
+ }
+ return v.(string), true
+}
diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go
similarity index 71%
rename from vendor/google.golang.org/grpc/proxy.go
rename to vendor/google.golang.org/grpc/internal/transport/proxy.go
index f8f69bf..4159619 100644
--- a/vendor/google.golang.org/grpc/proxy.go
+++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go
@@ -16,13 +16,12 @@
*
*/
-package grpc
+package transport
import (
"bufio"
"context"
"encoding/base64"
- "errors"
"fmt"
"io"
"net"
@@ -34,13 +33,11 @@
const proxyAuthHeaderKey = "Proxy-Authorization"
var (
- // errDisabled indicates that proxy is disabled for the address.
- errDisabled = errors.New("proxy is disabled for the address")
// The following variable will be overwritten in the tests.
httpProxyFromEnvironment = http.ProxyFromEnvironment
)
-func mapAddress(ctx context.Context, address string) (*url.URL, error) {
+func mapAddress(address string) (*url.URL, error) {
req := &http.Request{
URL: &url.URL{
Scheme: "https",
@@ -51,9 +48,6 @@
if err != nil {
return nil, err
}
- if url == nil {
- return nil, errDisabled
- }
return url, nil
}
@@ -76,7 +70,7 @@
return base64.StdEncoding.EncodeToString([]byte(auth))
}
-func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL) (_ net.Conn, err error) {
+func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) {
defer func() {
if err != nil {
conn.Close()
@@ -115,32 +109,28 @@
return &bufConn{Conn: conn, r: r}, nil
}
-// newProxyDialer returns a dialer that connects to proxy first if necessary.
-// The returned dialer checks if a proxy is necessary, dial to the proxy with the
-// provided dialer, does HTTP CONNECT handshake and returns the connection.
-func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) {
- return func(ctx context.Context, addr string) (conn net.Conn, err error) {
- var newAddr string
- proxyURL, err := mapAddress(ctx, addr)
- if err != nil {
- if err != errDisabled {
- return nil, err
- }
- newAddr = addr
- } else {
- newAddr = proxyURL.Host
- }
+// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy
+// is necessary, dials, does the HTTP CONNECT handshake, and returns the
+// connection.
+func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) {
+ newAddr := addr
+ proxyURL, err := mapAddress(addr)
+ if err != nil {
+ return nil, err
+ }
+ if proxyURL != nil {
+ newAddr = proxyURL.Host
+ }
- conn, err = dialer(ctx, newAddr)
- if err != nil {
- return
- }
- if proxyURL != nil {
- // proxy is disabled if proxyURL is nil.
- conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL)
- }
+ conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr)
+ if err != nil {
return
}
+ if proxyURL != nil {
+ // proxy is disabled if proxyURL is nil.
+ conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA)
+ }
+ return
}
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index a30da9e..d3bf65b 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -30,16 +30,20 @@
"net"
"sync"
"sync/atomic"
+ "time"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/resolver"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/tap"
)
+const logLevel = 2
+
type bufferPool struct {
pool sync.Pool
}
@@ -238,6 +242,7 @@
ctx context.Context // the associated context of the stream
cancel context.CancelFunc // always nil for client side Stream
done chan struct{} // closed at the end of stream to unblock writers. On the client side.
+ doneFunc func() // invoked at the end of stream on client side.
ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance)
method string // the associated RPC method of the stream
recvCompress string
@@ -514,7 +519,8 @@
// ServerConfig consists of all the configurations to establish a server transport.
type ServerConfig struct {
MaxStreams uint32
- AuthInfo credentials.AuthInfo
+ ConnectionTimeout time.Duration
+ Credentials credentials.TransportCredentials
InTapHandle tap.ServerInHandle
StatsHandler stats.Handler
KeepaliveParams keepalive.ServerParameters
@@ -528,12 +534,6 @@
HeaderTableSize *uint32
}
-// NewServerTransport creates a ServerTransport with conn or non-nil error
-// if it fails.
-func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) {
- return newHTTP2Server(conn, config)
-}
-
// ConnectOptions covers all relevant options for communicating with the server.
type ConnectOptions struct {
// UserAgent is the application user agent.
@@ -566,19 +566,14 @@
ChannelzParentID int64
// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
MaxHeaderListSize *uint32
-}
-
-// TargetInfo contains the information of the target such as network address and metadata.
-type TargetInfo struct {
- Addr string
- Metadata interface{}
- Authority string
+ // UseProxy specifies if a proxy should be used.
+ UseProxy bool
}
// NewClientTransport establishes the transport with the required ConnectOptions
// and returns it to the caller.
-func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
- return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose)
+func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
+ return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose)
}
// Options provides additional hints and information for message
@@ -613,6 +608,8 @@
ContentSubtype string
PreviousAttempts int // value of grpc-previous-rpc-attempts header to set
+
+ DoneFunc func() // called when the stream is finished
}
// ClientTransport is the common interface for all gRPC client-side transport
@@ -621,7 +618,7 @@
// Close tears down this transport. Once it returns, the transport
// should not be accessed any more. The caller must make sure this
// is called only once.
- Close() error
+ Close(err error)
// GracefulClose starts to tear down the transport: the transport will stop
// accepting new RPCs and NewStream will return error. Once all streams are
@@ -655,8 +652,9 @@
// HTTP/2).
GoAway() <-chan struct{}
- // GetGoAwayReason returns the reason why GoAway frame was received.
- GetGoAwayReason() GoAwayReason
+ // GetGoAwayReason returns the reason why GoAway frame was received, along
+ // with a human readable string with debug info.
+ GetGoAwayReason() (GoAwayReason, string)
// RemoteAddr returns the remote network address.
RemoteAddr() net.Addr
@@ -692,7 +690,7 @@
// Close tears down the transport. Once it is called, the transport
// should not be accessed any more. All the pending streams and their
// handlers will be terminated asynchronously.
- Close() error
+ Close()
// RemoteAddr returns the remote network address.
RemoteAddr() net.Addr
diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go
new file mode 100644
index 0000000..e8b4927
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package internal
+
+import (
+ "google.golang.org/grpc/attributes"
+ "google.golang.org/grpc/resolver"
+)
+
+// handshakeClusterNameKey is the type used as the key to store cluster name in
+// the Attributes field of resolver.Address.
+type handshakeClusterNameKey struct{}
+
+// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field
+// is updated with the cluster name.
+func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address {
+ addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName)
+ return addr
+}
+
+// GetXDSHandshakeClusterName returns cluster name stored in attr.
+func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) {
+ v := attr.Value(handshakeClusterNameKey{})
+ name, ok := v.(string)
+ return name, ok
+}
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
index cf6d1b9..3604c78 100644
--- a/vendor/google.golang.org/grpc/metadata/metadata.go
+++ b/vendor/google.golang.org/grpc/metadata/metadata.go
@@ -75,13 +75,9 @@
panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
}
md := MD{}
- var key string
- for i, s := range kv {
- if i%2 == 0 {
- key = strings.ToLower(s)
- continue
- }
- md[key] = append(md[key], s)
+ for i := 0; i < len(kv); i += 2 {
+ key := strings.ToLower(kv[i])
+ md[key] = append(md[key], kv[i+1])
}
return md
}
@@ -97,12 +93,16 @@
}
// Get obtains the values for a given key.
+//
+// k is converted to lowercase before searching in md.
func (md MD) Get(k string) []string {
k = strings.ToLower(k)
return md[k]
}
// Set sets the value of a given key with a slice of values.
+//
+// k is converted to lowercase before storing in md.
func (md MD) Set(k string, vals ...string) {
if len(vals) == 0 {
return
@@ -111,7 +111,10 @@
md[k] = vals
}
-// Append adds the values to key k, not overwriting what was already stored at that key.
+// Append adds the values to key k, not overwriting what was already stored at
+// that key.
+//
+// k is converted to lowercase before storing in md.
func (md MD) Append(k string, vals ...string) {
if len(vals) == 0 {
return
@@ -120,9 +123,17 @@
md[k] = append(md[k], vals...)
}
+// Delete removes the values for a given key k which is converted to lowercase
+// before removing it from md.
+func (md MD) Delete(k string) {
+ k = strings.ToLower(k)
+ delete(md, k)
+}
+
// Join joins any number of mds into a single MD.
-// The order of values for each key is determined by the order in which
-// the mds containing those values are presented to Join.
+//
+// The order of values for each key is determined by the order in which the mds
+// containing those values are presented to Join.
func Join(mds ...MD) MD {
out := MD{}
for _, md := range mds {
@@ -149,8 +160,8 @@
}
// AppendToOutgoingContext returns a new context with the provided kv merged
-// with any existing metadata in the context. Please refer to the
-// documentation of Pairs for a description of kv.
+// with any existing metadata in the context. Please refer to the documentation
+// of Pairs for a description of kv.
func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context {
if len(kv)%2 == 1 {
panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv)))
@@ -163,20 +174,34 @@
return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added})
}
-// FromIncomingContext returns the incoming metadata in ctx if it exists. The
-// returned MD should not be modified. Writing to it may cause races.
-// Modification should be made to copies of the returned MD.
-func FromIncomingContext(ctx context.Context) (md MD, ok bool) {
- md, ok = ctx.Value(mdIncomingKey{}).(MD)
- return
+// FromIncomingContext returns the incoming metadata in ctx if it exists.
+//
+// All keys in the returned MD are lowercase.
+func FromIncomingContext(ctx context.Context) (MD, bool) {
+ md, ok := ctx.Value(mdIncomingKey{}).(MD)
+ if !ok {
+ return nil, false
+ }
+ out := MD{}
+ for k, v := range md {
+ // We need to manually convert all keys to lower case, because MD is a
+ // map, and there's no guarantee that the MD attached to the context is
+ // created using our helper functions.
+ key := strings.ToLower(k)
+ out[key] = v
+ }
+ return out, true
}
-// FromOutgoingContextRaw returns the un-merged, intermediary contents
-// of rawMD. Remember to perform strings.ToLower on the keys. The returned
-// MD should not be modified. Writing to it may cause races. Modification
-// should be made to copies of the returned MD.
+// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD.
//
-// This is intended for gRPC-internal use ONLY.
+// Remember to perform strings.ToLower on the keys, for both the returned MD (MD
+// is a map, there's no guarantee it's created using our helper functions) and
+// the extra kv pairs (AppendToOutgoingContext doesn't turn them into
+// lowercase).
+//
+// This is intended for gRPC-internal use ONLY. Users should use
+// FromOutgoingContext instead.
func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
if !ok {
@@ -186,21 +211,34 @@
return raw.md, raw.added, true
}
-// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The
-// returned MD should not be modified. Writing to it may cause races.
-// Modification should be made to copies of the returned MD.
+// FromOutgoingContext returns the outgoing metadata in ctx if it exists.
+//
+// All keys in the returned MD are lowercase.
func FromOutgoingContext(ctx context.Context) (MD, bool) {
raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
if !ok {
return nil, false
}
- mds := make([]MD, 0, len(raw.added)+1)
- mds = append(mds, raw.md)
- for _, vv := range raw.added {
- mds = append(mds, Pairs(vv...))
+ out := MD{}
+ for k, v := range raw.md {
+ // We need to manually convert all keys to lower case, because MD is a
+ // map, and there's no guarantee that the MD attached to the context is
+ // created using our helper functions.
+ key := strings.ToLower(k)
+ out[key] = v
}
- return Join(mds...), ok
+ for _, added := range raw.added {
+ if len(added)%2 == 1 {
+ panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added)))
+ }
+
+ for i := 0; i < len(added); i += 2 {
+ key := strings.ToLower(added[i])
+ out[key] = append(out[key], added[i+1])
+ }
+ }
+ return out, ok
}
type rawMD struct {
diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go
deleted file mode 100644
index c9f79dc..0000000
--- a/vendor/google.golang.org/grpc/naming/dns_resolver.go
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package naming
-
-import (
- "context"
- "errors"
- "fmt"
- "net"
- "strconv"
- "time"
-
- "google.golang.org/grpc/grpclog"
-)
-
-const (
- defaultPort = "443"
- defaultFreq = time.Minute * 30
-)
-
-var (
- errMissingAddr = errors.New("missing address")
- errWatcherClose = errors.New("watcher has been closed")
-
- lookupHost = net.DefaultResolver.LookupHost
- lookupSRV = net.DefaultResolver.LookupSRV
-)
-
-// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and
-// create watchers that poll the DNS server using the frequency set by freq.
-func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) {
- return &dnsResolver{freq: freq}, nil
-}
-
-// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create
-// watchers that poll the DNS server using the default frequency defined by defaultFreq.
-func NewDNSResolver() (Resolver, error) {
- return NewDNSResolverWithFreq(defaultFreq)
-}
-
-// dnsResolver handles name resolution for names following the DNS scheme
-type dnsResolver struct {
- // frequency of polling the DNS server that the watchers created by this resolver will use.
- freq time.Duration
-}
-
-// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
-// If addr is an IPv4 address, return the addr and ok = true.
-// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
-func formatIP(addr string) (addrIP string, ok bool) {
- ip := net.ParseIP(addr)
- if ip == nil {
- return "", false
- }
- if ip.To4() != nil {
- return addr, true
- }
- return "[" + addr + "]", true
-}
-
-// parseTarget takes the user input target string, returns formatted host and port info.
-// If target doesn't specify a port, set the port to be the defaultPort.
-// If target is in IPv6 format and host-name is enclosed in square brackets, brackets
-// are stripped when setting the host.
-// examples:
-// target: "www.google.com" returns host: "www.google.com", port: "443"
-// target: "ipv4-host:80" returns host: "ipv4-host", port: "80"
-// target: "[ipv6-host]" returns host: "ipv6-host", port: "443"
-// target: ":80" returns host: "localhost", port: "80"
-// target: ":" returns host: "localhost", port: "443"
-func parseTarget(target string) (host, port string, err error) {
- if target == "" {
- return "", "", errMissingAddr
- }
-
- if ip := net.ParseIP(target); ip != nil {
- // target is an IPv4 or IPv6(without brackets) address
- return target, defaultPort, nil
- }
- if host, port, err := net.SplitHostPort(target); err == nil {
- // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
- if host == "" {
- // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
- host = "localhost"
- }
- if port == "" {
- // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used.
- port = defaultPort
- }
- return host, port, nil
- }
- if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil {
- // target doesn't have port
- return host, port, nil
- }
- return "", "", fmt.Errorf("invalid target address %v", target)
-}
-
-// Resolve creates a watcher that watches the name resolution of the target.
-func (r *dnsResolver) Resolve(target string) (Watcher, error) {
- host, port, err := parseTarget(target)
- if err != nil {
- return nil, err
- }
-
- if net.ParseIP(host) != nil {
- ipWatcher := &ipWatcher{
- updateChan: make(chan *Update, 1),
- }
- host, _ = formatIP(host)
- ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port}
- return ipWatcher, nil
- }
-
- ctx, cancel := context.WithCancel(context.Background())
- return &dnsWatcher{
- r: r,
- host: host,
- port: port,
- ctx: ctx,
- cancel: cancel,
- t: time.NewTimer(0),
- }, nil
-}
-
-// dnsWatcher watches for the name resolution update for a specific target
-type dnsWatcher struct {
- r *dnsResolver
- host string
- port string
- // The latest resolved address set
- curAddrs map[string]*Update
- ctx context.Context
- cancel context.CancelFunc
- t *time.Timer
-}
-
-// ipWatcher watches for the name resolution update for an IP address.
-type ipWatcher struct {
- updateChan chan *Update
-}
-
-// Next returns the address resolution Update for the target. For IP address,
-// the resolution is itself, thus polling name server is unnecessary. Therefore,
-// Next() will return an Update the first time it is called, and will be blocked
-// for all following calls as no Update exists until watcher is closed.
-func (i *ipWatcher) Next() ([]*Update, error) {
- u, ok := <-i.updateChan
- if !ok {
- return nil, errWatcherClose
- }
- return []*Update{u}, nil
-}
-
-// Close closes the ipWatcher.
-func (i *ipWatcher) Close() {
- close(i.updateChan)
-}
-
-// AddressType indicates the address type returned by name resolution.
-type AddressType uint8
-
-const (
- // Backend indicates the server is a backend server.
- Backend AddressType = iota
- // GRPCLB indicates the server is a grpclb load balancer.
- GRPCLB
-)
-
-// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The
-// name resolver used by the grpclb balancer is required to provide this type of metadata in
-// its address updates.
-type AddrMetadataGRPCLB struct {
- // AddrType is the type of server (grpc load balancer or backend).
- AddrType AddressType
- // ServerName is the name of the grpc load balancer. Used for authentication.
- ServerName string
-}
-
-// compileUpdate compares the old resolved addresses and newly resolved addresses,
-// and generates an update list
-func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update {
- var res []*Update
- for a, u := range w.curAddrs {
- if _, ok := newAddrs[a]; !ok {
- u.Op = Delete
- res = append(res, u)
- }
- }
- for a, u := range newAddrs {
- if _, ok := w.curAddrs[a]; !ok {
- res = append(res, u)
- }
- }
- return res
-}
-
-func (w *dnsWatcher) lookupSRV() map[string]*Update {
- newAddrs := make(map[string]*Update)
- _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host)
- if err != nil {
- grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
- return nil
- }
- for _, s := range srvs {
- lbAddrs, err := lookupHost(w.ctx, s.Target)
- if err != nil {
- grpclog.Warningf("grpc: failed load balancer address dns lookup due to %v.\n", err)
- continue
- }
- for _, a := range lbAddrs {
- a, ok := formatIP(a)
- if !ok {
- grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
- continue
- }
- addr := a + ":" + strconv.Itoa(int(s.Port))
- newAddrs[addr] = &Update{Addr: addr,
- Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}}
- }
- }
- return newAddrs
-}
-
-func (w *dnsWatcher) lookupHost() map[string]*Update {
- newAddrs := make(map[string]*Update)
- addrs, err := lookupHost(w.ctx, w.host)
- if err != nil {
- grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
- return nil
- }
- for _, a := range addrs {
- a, ok := formatIP(a)
- if !ok {
- grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
- continue
- }
- addr := a + ":" + w.port
- newAddrs[addr] = &Update{Addr: addr}
- }
- return newAddrs
-}
-
-func (w *dnsWatcher) lookup() []*Update {
- newAddrs := w.lookupSRV()
- if newAddrs == nil {
- // If failed to get any balancer address (either no corresponding SRV for the
- // target, or caused by failure during resolution/parsing of the balancer target),
- // return any A record info available.
- newAddrs = w.lookupHost()
- }
- result := w.compileUpdate(newAddrs)
- w.curAddrs = newAddrs
- return result
-}
-
-// Next returns the resolved address update(delta) for the target. If there's no
-// change, it will sleep for 30 mins and try to resolve again after that.
-func (w *dnsWatcher) Next() ([]*Update, error) {
- for {
- select {
- case <-w.ctx.Done():
- return nil, errWatcherClose
- case <-w.t.C:
- }
- result := w.lookup()
- // Next lookup should happen after an interval defined by w.r.freq.
- w.t.Reset(w.r.freq)
- if len(result) > 0 {
- return result, nil
- }
- }
-}
-
-func (w *dnsWatcher) Close() {
- w.cancel()
-}
diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go
deleted file mode 100644
index f4c1c8b..0000000
--- a/vendor/google.golang.org/grpc/naming/naming.go
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package naming defines the naming API and related data structures for gRPC.
-//
-// This package is deprecated: please use package resolver instead.
-package naming
-
-// Operation defines the corresponding operations for a name resolution change.
-//
-// Deprecated: please use package resolver.
-type Operation uint8
-
-const (
- // Add indicates a new address is added.
- Add Operation = iota
- // Delete indicates an existing address is deleted.
- Delete
-)
-
-// Update defines a name resolution update. Notice that it is not valid having both
-// empty string Addr and nil Metadata in an Update.
-//
-// Deprecated: please use package resolver.
-type Update struct {
- // Op indicates the operation of the update.
- Op Operation
- // Addr is the updated address. It is empty string if there is no address update.
- Addr string
- // Metadata is the updated metadata. It is nil if there is no metadata update.
- // Metadata is not required for a custom naming implementation.
- Metadata interface{}
-}
-
-// Resolver creates a Watcher for a target to track its resolution changes.
-//
-// Deprecated: please use package resolver.
-type Resolver interface {
- // Resolve creates a Watcher for target.
- Resolve(target string) (Watcher, error)
-}
-
-// Watcher watches for the updates on the specified target.
-//
-// Deprecated: please use package resolver.
-type Watcher interface {
- // Next blocks until an update or error happens. It may return one or more
- // updates. The first call should get the full set of the results. It should
- // return an error if and only if Watcher cannot recover.
- Next() ([]*Update, error)
- // Close closes the Watcher.
- Close()
-}
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
index 0044789..e8367cb 100644
--- a/vendor/google.golang.org/grpc/picker_wrapper.go
+++ b/vendor/google.golang.org/grpc/picker_wrapper.go
@@ -20,80 +20,31 @@
import (
"context"
- "fmt"
"io"
"sync"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/status"
)
-// v2PickerWrapper wraps a balancer.Picker while providing the
-// balancer.V2Picker API. It requires a pickerWrapper to generate errors
-// including the latest connectionError. To be deleted when balancer.Picker is
-// updated to the balancer.V2Picker API.
-type v2PickerWrapper struct {
- picker balancer.Picker
- connErr *connErr
-}
-
-func (v *v2PickerWrapper) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
- sc, done, err := v.picker.Pick(info.Ctx, info)
- if err != nil {
- if err == balancer.ErrTransientFailure {
- return balancer.PickResult{}, balancer.TransientFailureError(fmt.Errorf("%v, latest connection error: %v", err, v.connErr.connectionError()))
- }
- return balancer.PickResult{}, err
- }
- return balancer.PickResult{SubConn: sc, Done: done}, nil
-}
-
// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
// actions and unblock when there's a picker update.
type pickerWrapper struct {
mu sync.Mutex
done bool
blockingCh chan struct{}
- picker balancer.V2Picker
-
- // The latest connection error. TODO: remove when V1 picker is deprecated;
- // balancer should be responsible for providing the error.
- *connErr
-}
-
-type connErr struct {
- mu sync.Mutex
- err error
-}
-
-func (c *connErr) updateConnectionError(err error) {
- c.mu.Lock()
- c.err = err
- c.mu.Unlock()
-}
-
-func (c *connErr) connectionError() error {
- c.mu.Lock()
- err := c.err
- c.mu.Unlock()
- return err
+ picker balancer.Picker
}
func newPickerWrapper() *pickerWrapper {
- return &pickerWrapper{blockingCh: make(chan struct{}), connErr: &connErr{}}
+ return &pickerWrapper{blockingCh: make(chan struct{})}
}
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
- pw.updatePickerV2(&v2PickerWrapper{picker: p, connErr: pw.connErr})
-}
-
-// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
-func (pw *pickerWrapper) updatePickerV2(p balancer.V2Picker) {
pw.mu.Lock()
if pw.done {
pw.mu.Unlock()
@@ -154,8 +105,6 @@
var errStr string
if lastPickErr != nil {
errStr = "latest balancer error: " + lastPickErr.Error()
- } else if connectionErr := pw.connectionError(); connectionErr != nil {
- errStr = "latest connection error: " + connectionErr.Error()
} else {
errStr = ctx.Err().Error()
}
@@ -180,26 +129,25 @@
if err == balancer.ErrNoSubConnAvailable {
continue
}
- if tfe, ok := err.(interface{ IsTransientFailure() bool }); ok && tfe.IsTransientFailure() {
- if !failfast {
- lastPickErr = err
- continue
- }
- return nil, nil, status.Error(codes.Unavailable, err.Error())
- }
if _, ok := status.FromError(err); ok {
+ // Status error: end the RPC unconditionally with this status.
return nil, nil, err
}
- // err is some other error.
- return nil, nil, status.Error(codes.Unknown, err.Error())
+ // For all other errors, wait for ready RPCs should block and other
+ // RPCs should fail with unavailable.
+ if !failfast {
+ lastPickErr = err
+ continue
+ }
+ return nil, nil, status.Error(codes.Unavailable, err.Error())
}
acw, ok := pickResult.SubConn.(*acBalancerWrapper)
if !ok {
- grpclog.Error("subconn returned from pick is not *acBalancerWrapper")
+ logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn)
continue
}
- if t, ok := acw.getAddrConn().getReadyTransport(); ok {
+ if t := acw.getAddrConn().getReadyTransport(); t != nil {
if channelz.IsOn() {
return t, doneChannelzWrapper(acw, pickResult.Done), nil
}
@@ -210,7 +158,7 @@
// DoneInfo with default value works.
pickResult.Done(balancer.DoneInfo{})
}
- grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
+ logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
// If ok == false, ac.state is not READY.
// A valid picker always returns READY subConn. This means the state of ac
// just changed, and picker will be updated shortly.
diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go
index c43dac9..5168b62 100644
--- a/vendor/google.golang.org/grpc/pickfirst.go
+++ b/vendor/google.golang.org/grpc/pickfirst.go
@@ -20,13 +20,10 @@
import (
"errors"
+ "fmt"
"google.golang.org/grpc/balancer"
- "google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/resolver"
- "google.golang.org/grpc/status"
)
// PickFirstBalancerName is the name of the pick_first balancer.
@@ -52,30 +49,16 @@
sc balancer.SubConn
}
-var _ balancer.V2Balancer = &pickfirstBalancer{} // Assert we implement v2
-
-func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
- if err != nil {
- b.ResolverError(err)
- return
- }
- b.UpdateClientConnState(balancer.ClientConnState{ResolverState: resolver.State{Addresses: addrs}}) // Ignore error
-}
-
-func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
- b.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s})
-}
-
func (b *pickfirstBalancer) ResolverError(err error) {
switch b.state {
case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting:
// Set a failing picker if we don't have a good picker.
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: status.Errorf(codes.Unavailable, "name resolver error: %v", err)}},
- )
+ Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
+ })
}
- if grpclog.V(2) {
- grpclog.Infof("pickfirstBalancer: ResolverError called with error %v", err)
+ if logger.V(2) {
+ logger.Infof("pickfirstBalancer: ResolverError called with error %v", err)
}
}
@@ -88,32 +71,32 @@
var err error
b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{})
if err != nil {
- if grpclog.V(2) {
- grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
+ if logger.V(2) {
+ logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
}
b.state = connectivity.TransientFailure
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: status.Errorf(codes.Unavailable, "error creating connection: %v", err)}},
- )
+ Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)},
+ })
return balancer.ErrBadResolverState
}
b.state = connectivity.Idle
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}})
b.sc.Connect()
} else {
- b.sc.UpdateAddresses(cs.ResolverState.Addresses)
+ b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses)
b.sc.Connect()
}
return nil
}
func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) {
- if grpclog.V(2) {
- grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
+ if logger.V(2) {
+ logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s)
}
if b.sc != sc {
- if grpclog.V(2) {
- grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
+ if logger.V(2) {
+ logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
}
return
}
@@ -124,20 +107,16 @@
}
switch s.ConnectivityState {
- case connectivity.Ready, connectivity.Idle:
+ case connectivity.Ready:
b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}})
case connectivity.Connecting:
b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}})
+ case connectivity.Idle:
+ b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &idlePicker{sc: sc}})
case connectivity.TransientFailure:
- err := balancer.ErrTransientFailure
- // TODO: this can be unconditional after the V1 API is removed, as
- // SubConnState will always contain a connection error.
- if s.ConnectionError != nil {
- err = balancer.TransientFailureError(s.ConnectionError)
- }
b.cc.UpdateState(balancer.State{
ConnectivityState: s.ConnectivityState,
- Picker: &picker{err: err},
+ Picker: &picker{err: s.ConnectionError},
})
}
}
@@ -145,6 +124,12 @@
func (b *pickfirstBalancer) Close() {
}
+func (b *pickfirstBalancer) ExitIdle() {
+ if b.sc != nil && b.state == connectivity.Idle {
+ b.sc.Connect()
+ }
+}
+
type picker struct {
result balancer.PickResult
err error
@@ -154,6 +139,17 @@
return p.result, p.err
}
+// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
+// CONNECTING when Pick is called.
+type idlePicker struct {
+ sc balancer.SubConn
+}
+
+func (i *idlePicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+ i.sc.Connect()
+ return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
+}
+
func init() {
balancer.Register(newPickfirstBuilder())
}
diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go
index 76acbbc..0a1e975 100644
--- a/vendor/google.golang.org/grpc/preloader.go
+++ b/vendor/google.golang.org/grpc/preloader.go
@@ -25,7 +25,10 @@
// PreparedMsg is responsible for creating a Marshalled and Compressed object.
//
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type PreparedMsg struct {
// Struct for preparing msg before sending them
encodedData []byte
diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh
new file mode 100644
index 0000000..58c802f
--- /dev/null
+++ b/vendor/google.golang.org/grpc/regenerate.sh
@@ -0,0 +1,131 @@
+#!/bin/bash
+# Copyright 2020 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eu -o pipefail
+
+WORKDIR=$(mktemp -d)
+
+function finish {
+ rm -rf "$WORKDIR"
+}
+trap finish EXIT
+
+export GOBIN=${WORKDIR}/bin
+export PATH=${GOBIN}:${PATH}
+mkdir -p ${GOBIN}
+
+echo "remove existing generated files"
+# grpc_testingv3/testv3.pb.go is not re-generated because it was
+# intentionally generated by an older version of protoc-gen-go.
+rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testingv3/testv3.pb.go')
+
+echo "go install google.golang.org/protobuf/cmd/protoc-gen-go"
+(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go)
+
+echo "go install cmd/protoc-gen-go-grpc"
+(cd cmd/protoc-gen-go-grpc && go install .)
+
+echo "git clone https://github.com/grpc/grpc-proto"
+git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto
+
+echo "git clone https://github.com/protocolbuffers/protobuf"
+git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf
+
+# Pull in code.proto as a proto dependency
+mkdir -p ${WORKDIR}/googleapis/google/rpc
+echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto"
+curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto
+
+mkdir -p ${WORKDIR}/out
+
+# Generates sources without the embed requirement
+LEGACY_SOURCES=(
+ ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto
+ ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto
+ ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto
+ ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto
+ profiling/proto/service.proto
+ reflection/grpc_reflection_v1alpha/reflection.proto
+)
+
+# Generates only the new gRPC Service symbols
+SOURCES=(
+ $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^\(profiling/proto/service.proto\|reflection/grpc_reflection_v1alpha/reflection.proto\)$')
+ ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto
+ ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto
+ ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto
+ ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto
+ ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto
+ ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto
+ ${WORKDIR}/grpc-proto/grpc/testing/*.proto
+ ${WORKDIR}/grpc-proto/grpc/core/*.proto
+)
+
+# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an
+# import path of 'bar' in the generated code when 'foo.proto' is imported in
+# one of the sources.
+#
+# Note that the protos listed here are all for testing purposes. All protos to
+# be used externally should have a go_package option (and they don't need to be
+# listed here).
+OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\
+Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\
+Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing
+
+for src in ${SOURCES[@]}; do
+ echo "protoc ${src}"
+ protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS}:${WORKDIR}/out \
+ -I"." \
+ -I${WORKDIR}/grpc-proto \
+ -I${WORKDIR}/googleapis \
+ -I${WORKDIR}/protobuf/src \
+ ${src}
+done
+
+for src in ${LEGACY_SOURCES[@]}; do
+ echo "protoc ${src}"
+ protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \
+ -I"." \
+ -I${WORKDIR}/grpc-proto \
+ -I${WORKDIR}/googleapis \
+ -I${WORKDIR}/protobuf/src \
+ ${src}
+done
+
+# The go_package option in grpc/lookup/v1/rls.proto doesn't match the
+# current location. Move it into the right place.
+mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
+mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
+
+# grpc_testingv3/testv3.pb.go is not re-generated because it was
+# intentionally generated by an older version of protoc-gen-go.
+rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go
+
+# grpc/service_config/service_config.proto does not have a go_package option.
+mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config
+
+# grpc/testing does not have a go_package option.
+mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/
+mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/
+
+cp -R ${WORKDIR}/out/google.golang.org/grpc/* .
diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go
new file mode 100644
index 0000000..e87ecd0
--- /dev/null
+++ b/vendor/google.golang.org/grpc/resolver/map.go
@@ -0,0 +1,109 @@
+/*
+ *
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package resolver
+
+type addressMapEntry struct {
+ addr Address
+ value interface{}
+}
+
+// AddressMap is a map of addresses to arbitrary values taking into account
+// Attributes. BalancerAttributes are ignored, as are Metadata and Type.
+// Multiple accesses may not be performed concurrently. Must be created via
+// NewAddressMap; do not construct directly.
+type AddressMap struct {
+ m map[string]addressMapEntryList
+}
+
+type addressMapEntryList []*addressMapEntry
+
+// NewAddressMap creates a new AddressMap.
+func NewAddressMap() *AddressMap {
+ return &AddressMap{m: make(map[string]addressMapEntryList)}
+}
+
+// find returns the index of addr in the addressMapEntry slice, or -1 if not
+// present.
+func (l addressMapEntryList) find(addr Address) int {
+ if len(l) == 0 {
+ return -1
+ }
+ for i, entry := range l {
+ if entry.addr.ServerName == addr.ServerName &&
+ entry.addr.Attributes.Equal(addr.Attributes) {
+ return i
+ }
+ }
+ return -1
+}
+
+// Get returns the value for the address in the map, if present.
+func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) {
+ entryList := a.m[addr.Addr]
+ if entry := entryList.find(addr); entry != -1 {
+ return entryList[entry].value, true
+ }
+ return nil, false
+}
+
+// Set updates or adds the value to the address in the map.
+func (a *AddressMap) Set(addr Address, value interface{}) {
+ entryList := a.m[addr.Addr]
+ if entry := entryList.find(addr); entry != -1 {
+ a.m[addr.Addr][entry].value = value
+ return
+ }
+ a.m[addr.Addr] = append(a.m[addr.Addr], &addressMapEntry{addr: addr, value: value})
+}
+
+// Delete removes addr from the map.
+func (a *AddressMap) Delete(addr Address) {
+ entryList := a.m[addr.Addr]
+ entry := entryList.find(addr)
+ if entry == -1 {
+ return
+ }
+ if len(entryList) == 1 {
+ entryList = nil
+ } else {
+ copy(entryList[entry:], entryList[entry+1:])
+ entryList = entryList[:len(entryList)-1]
+ }
+ a.m[addr.Addr] = entryList
+}
+
+// Len returns the number of entries in the map.
+func (a *AddressMap) Len() int {
+ ret := 0
+ for _, entryList := range a.m {
+ ret += len(entryList)
+ }
+ return ret
+}
+
+// Keys returns a slice of all current map keys.
+func (a *AddressMap) Keys() []Address {
+ ret := make([]Address, 0, a.Len())
+ for _, entryList := range a.m {
+ for _, entry := range entryList {
+ ret = append(ret, entry.addr)
+ }
+ }
+ return ret
+}
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
index fe14b2f..e28b680 100644
--- a/vendor/google.golang.org/grpc/resolver/resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/resolver.go
@@ -23,6 +23,7 @@
import (
"context"
"net"
+ "net/url"
"google.golang.org/grpc/attributes"
"google.golang.org/grpc/credentials"
@@ -85,12 +86,19 @@
Backend AddressType = iota
// GRPCLB indicates the address is for a grpclb load balancer.
//
- // Deprecated: use Attributes in Address instead.
+ // Deprecated: to select the GRPCLB load balancing policy, use a service
+ // config with a corresponding loadBalancingConfig. To supply balancer
+ // addresses to the GRPCLB load balancing policy, set State.Attributes
+ // using balancer/grpclb/state.Set.
GRPCLB
)
// Address represents a server the client connects to.
-// This is the EXPERIMENTAL API and may be changed or extended in the future.
+//
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type Address struct {
// Addr is the server address on which a connection will be established.
Addr string
@@ -109,9 +117,14 @@
ServerName string
// Attributes contains arbitrary data about this address intended for
- // consumption by the load balancing policy.
+ // consumption by the SubConn.
Attributes *attributes.Attributes
+ // BalancerAttributes contains arbitrary data about this address intended
+ // for consumption by the LB policy. These attribes do not affect SubConn
+ // creation, connection establishment, handshaking, etc.
+ BalancerAttributes *attributes.Attributes
+
// Type is the type of this address.
//
// Deprecated: use Attributes instead.
@@ -124,6 +137,15 @@
Metadata interface{}
}
+// Equal returns whether a and o are identical. Metadata is compared directly,
+// not with any recursive introspection.
+func (a *Address) Equal(o Address) bool {
+ return a.Addr == o.Addr && a.ServerName == o.ServerName &&
+ a.Attributes.Equal(o.Attributes) &&
+ a.BalancerAttributes.Equal(o.BalancerAttributes) &&
+ a.Type == o.Type && a.Metadata == o.Metadata
+}
+
// BuildOptions includes additional information for the builder to create
// the resolver.
type BuildOptions struct {
@@ -174,7 +196,7 @@
// gRPC to add new methods to this interface.
type ClientConn interface {
// UpdateState updates the state of the ClientConn appropriately.
- UpdateState(State)
+ UpdateState(State) error
// ReportError notifies the ClientConn that the Resolver encountered an
// error. The ClientConn will notify the load balancer and begin calling
// ResolveNow on the Resolver with exponential backoff.
@@ -197,25 +219,36 @@
// Target represents a target for gRPC, as specified in:
// https://github.com/grpc/grpc/blob/master/doc/naming.md.
-// It is parsed from the target string that gets passed into Dial or DialContext by the user. And
-// grpc passes it to the resolver and the balancer.
+// It is parsed from the target string that gets passed into Dial or DialContext
+// by the user. And gRPC passes it to the resolver and the balancer.
//
-// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will
-// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed
-// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
+// If the target follows the naming spec, and the parsed scheme is registered
+// with gRPC, we will parse the target string according to the spec. If the
+// target does not contain a scheme or if the parsed scheme is not registered
+// (i.e. no corresponding resolver available to resolve the endpoint), we will
+// apply the default scheme, and will attempt to reparse it.
//
-// If the target does not contain a scheme, we will apply the default scheme, and set the Target to
-// be the full target string. e.g. "foo.bar" will be parsed into
-// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}.
+// Examples:
//
-// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the
-// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target
-// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into
-// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}.
+// - "dns://some_authority/foo.bar"
+// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
+// - "foo.bar"
+// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}
+// - "unknown_scheme://authority/endpoint"
+// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}
type Target struct {
- Scheme string
+ // Deprecated: use URL.Scheme instead.
+ Scheme string
+ // Deprecated: use URL.Host instead.
Authority string
- Endpoint string
+ // Deprecated: use URL.Path or URL.Opaque instead. The latter is set when
+ // the former is empty.
+ Endpoint string
+ // URL contains the parsed dial target with an optional default scheme added
+ // to it if the original dial target contained no scheme or contained an
+ // unregistered scheme. Any query params specified in the original dial
+ // target can be accessed from here.
+ URL url.URL
}
// Builder creates a resolver that will be used to watch name resolution updates.
diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
index edfda86..2c47cd5 100644
--- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
+++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
@@ -22,7 +22,6 @@
"fmt"
"strings"
"sync"
- "time"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/credentials"
@@ -41,8 +40,7 @@
done *grpcsync.Event
curState resolver.State
- pollingMu sync.Mutex
- polling chan struct{}
+ incomingMu sync.Mutex // Synchronizes all the incoming calls.
}
// newCCResolverWrapper uses the resolver.Builder to build a Resolver and
@@ -93,104 +91,71 @@
ccr.resolverMu.Unlock()
}
-// poll begins or ends asynchronous polling of the resolver based on whether
-// err is ErrBadResolverState.
-func (ccr *ccResolverWrapper) poll(err error) {
- ccr.pollingMu.Lock()
- defer ccr.pollingMu.Unlock()
- if err != balancer.ErrBadResolverState {
- // stop polling
- if ccr.polling != nil {
- close(ccr.polling)
- ccr.polling = nil
- }
- return
- }
- if ccr.polling != nil {
- // already polling
- return
- }
- p := make(chan struct{})
- ccr.polling = p
- go func() {
- for i := 0; ; i++ {
- ccr.resolveNow(resolver.ResolveNowOptions{})
- t := time.NewTimer(ccr.cc.dopts.resolveNowBackoff(i))
- select {
- case <-p:
- t.Stop()
- return
- case <-ccr.done.Done():
- // Resolver has been closed.
- t.Stop()
- return
- case <-t.C:
- select {
- case <-p:
- return
- default:
- }
- // Timer expired; re-resolve.
- }
- }
- }()
-}
-
-func (ccr *ccResolverWrapper) UpdateState(s resolver.State) {
+func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
+ ccr.incomingMu.Lock()
+ defer ccr.incomingMu.Unlock()
if ccr.done.HasFired() {
- return
+ return nil
}
- channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s)
+ channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s)
if channelz.IsOn() {
ccr.addChannelzTraceEvent(s)
}
ccr.curState = s
- ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
+ if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState {
+ return balancer.ErrBadResolverState
+ }
+ return nil
}
func (ccr *ccResolverWrapper) ReportError(err error) {
+ ccr.incomingMu.Lock()
+ defer ccr.incomingMu.Unlock()
if ccr.done.HasFired() {
return
}
- channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err)
- ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err))
+ channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err)
+ ccr.cc.updateResolverState(resolver.State{}, err)
}
// NewAddress is called by the resolver implementation to send addresses to gRPC.
func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
+ ccr.incomingMu.Lock()
+ defer ccr.incomingMu.Unlock()
if ccr.done.HasFired() {
return
}
- channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs)
+ channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs)
if channelz.IsOn() {
ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
}
ccr.curState.Addresses = addrs
- ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
+ ccr.cc.updateResolverState(ccr.curState, nil)
}
// NewServiceConfig is called by the resolver implementation to send service
// configs to gRPC.
func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
+ ccr.incomingMu.Lock()
+ defer ccr.incomingMu.Unlock()
if ccr.done.HasFired() {
return
}
- channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc)
+ channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc)
if ccr.cc.dopts.disableServiceConfig {
- channelz.Info(ccr.cc.channelzID, "Service config lookups disabled; ignoring config")
+ channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config")
return
}
scpr := parseServiceConfig(sc)
if scpr.Err != nil {
- channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
- ccr.poll(balancer.ErrBadResolverState)
+ channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
return
}
if channelz.IsOn() {
ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
}
ccr.curState.ServiceConfig = scpr
- ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
+ ccr.cc.updateResolverState(ccr.curState, nil)
}
func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
@@ -215,8 +180,8 @@
} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
updates = append(updates, "resolver returned new addresses")
}
- channelz.AddTraceEvent(ccr.cc.channelzID, 0, &channelz.TraceEventDesc{
+ channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")),
- Severity: channelz.CtINFO,
+ Severity: channelz.CtInfo,
})
}
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index cf9dbe7..5d407b0 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -27,7 +27,6 @@
"io"
"io/ioutil"
"math"
- "net/url"
"strings"
"sync"
"time"
@@ -155,7 +154,6 @@
type callInfo struct {
compressorType string
failFast bool
- stream ClientStream
maxReceiveMessageSize *int
maxSendMessageSize *int
creds credentials.PerRPCCredentials
@@ -180,7 +178,7 @@
// after is called after the call has completed. after cannot return an
// error, so any failures should be reported via output parameters.
- after(*callInfo)
+ after(*callInfo, *csAttempt)
}
// EmptyCallOption does not alter the Call configuration.
@@ -188,8 +186,8 @@
// by interceptors.
type EmptyCallOption struct{}
-func (EmptyCallOption) before(*callInfo) error { return nil }
-func (EmptyCallOption) after(*callInfo) {}
+func (EmptyCallOption) before(*callInfo) error { return nil }
+func (EmptyCallOption) after(*callInfo, *csAttempt) {}
// Header returns a CallOptions that retrieves the header metadata
// for a unary RPC.
@@ -199,16 +197,18 @@
// HeaderCallOption is a CallOption for collecting response header metadata.
// The metadata field will be populated *after* the RPC completes.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type HeaderCallOption struct {
HeaderAddr *metadata.MD
}
func (o HeaderCallOption) before(c *callInfo) error { return nil }
-func (o HeaderCallOption) after(c *callInfo) {
- if c.stream != nil {
- *o.HeaderAddr, _ = c.stream.Header()
- }
+func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) {
+ *o.HeaderAddr, _ = attempt.s.Header()
}
// Trailer returns a CallOptions that retrieves the trailer metadata
@@ -219,16 +219,18 @@
// TrailerCallOption is a CallOption for collecting response trailer metadata.
// The metadata field will be populated *after* the RPC completes.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type TrailerCallOption struct {
TrailerAddr *metadata.MD
}
func (o TrailerCallOption) before(c *callInfo) error { return nil }
-func (o TrailerCallOption) after(c *callInfo) {
- if c.stream != nil {
- *o.TrailerAddr = c.stream.Trailer()
- }
+func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) {
+ *o.TrailerAddr = attempt.s.Trailer()
}
// Peer returns a CallOption that retrieves peer information for a unary RPC.
@@ -239,22 +241,25 @@
// PeerCallOption is a CallOption for collecting the identity of the remote
// peer. The peer field will be populated *after* the RPC completes.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type PeerCallOption struct {
PeerAddr *peer.Peer
}
func (o PeerCallOption) before(c *callInfo) error { return nil }
-func (o PeerCallOption) after(c *callInfo) {
- if c.stream != nil {
- if x, ok := peer.FromContext(c.stream.Context()); ok {
- *o.PeerAddr = *x
- }
+func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) {
+ if x, ok := peer.FromContext(attempt.s.Context()); ok {
+ *o.PeerAddr = *x
}
}
// WaitForReady configures the action to take when an RPC is attempted on broken
-// connections or unreachable servers. If waitForReady is false, the RPC will fail
+// connections or unreachable servers. If waitForReady is false and the
+// connection is in the TRANSIENT_FAILURE state, the RPC will fail
// immediately. Otherwise, the RPC client will block the call until a
// connection is available (or the call is canceled or times out) and will
// retry the call if it fails due to a transient error. gRPC will not retry if
@@ -276,7 +281,11 @@
// FailFastCallOption is a CallOption for indicating whether an RPC should fail
// fast or not.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type FailFastCallOption struct {
FailFast bool
}
@@ -285,7 +294,7 @@
c.failFast = o.FailFast
return nil
}
-func (o FailFastCallOption) after(c *callInfo) {}
+func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {}
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
// in bytes the client can receive.
@@ -295,7 +304,11 @@
// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
// size in bytes the client can receive.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type MaxRecvMsgSizeCallOption struct {
MaxRecvMsgSize int
}
@@ -304,7 +317,7 @@
c.maxReceiveMessageSize = &o.MaxRecvMsgSize
return nil
}
-func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {}
+func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size
// in bytes the client can send.
@@ -314,7 +327,11 @@
// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
// size in bytes the client can send.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type MaxSendMsgSizeCallOption struct {
MaxSendMsgSize int
}
@@ -323,7 +340,7 @@
c.maxSendMessageSize = &o.MaxSendMsgSize
return nil
}
-func (o MaxSendMsgSizeCallOption) after(c *callInfo) {}
+func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
// for a call.
@@ -333,7 +350,11 @@
// PerRPCCredsCallOption is a CallOption that indicates the per-RPC
// credentials to use for the call.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type PerRPCCredsCallOption struct {
Creds credentials.PerRPCCredentials
}
@@ -342,19 +363,26 @@
c.creds = o.Creds
return nil
}
-func (o PerRPCCredsCallOption) after(c *callInfo) {}
+func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {}
// UseCompressor returns a CallOption which sets the compressor used when
// sending the request. If WithCompressor is also set, UseCompressor has
// higher priority.
//
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func UseCompressor(name string) CallOption {
return CompressorCallOption{CompressorType: name}
}
// CompressorCallOption is a CallOption that indicates the compressor to use.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type CompressorCallOption struct {
CompressorType string
}
@@ -363,7 +391,7 @@
c.compressorType = o.CompressorType
return nil
}
-func (o CompressorCallOption) after(c *callInfo) {}
+func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {}
// CallContentSubtype returns a CallOption that will set the content-subtype
// for a call. For example, if content-subtype is "json", the Content-Type over
@@ -387,7 +415,11 @@
// ContentSubtypeCallOption is a CallOption that indicates the content-subtype
// used for marshaling messages.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type ContentSubtypeCallOption struct {
ContentSubtype string
}
@@ -396,11 +428,12 @@
c.contentSubtype = o.ContentSubtype
return nil
}
-func (o ContentSubtypeCallOption) after(c *callInfo) {}
+func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {}
-// ForceCodec returns a CallOption that will set the given Codec to be
-// used for all request and response messages for a call. The result of calling
-// String() will be used as the content-subtype in a case-insensitive manner.
+// ForceCodec returns a CallOption that will set codec to be used for all
+// request and response messages for a call. The result of calling Name() will
+// be used as the content-subtype after converting to lowercase, unless
+// CallContentSubtype is also used.
//
// See Content-Type on
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
@@ -411,7 +444,10 @@
// This function is provided for advanced users; prefer to use only
// CallContentSubtype to select a registered codec instead.
//
-// This is an EXPERIMENTAL API.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func ForceCodec(codec encoding.Codec) CallOption {
return ForceCodecCallOption{Codec: codec}
}
@@ -419,7 +455,10 @@
// ForceCodecCallOption is a CallOption that indicates the codec used for
// marshaling messages.
//
-// This is an EXPERIMENTAL API.
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type ForceCodecCallOption struct {
Codec encoding.Codec
}
@@ -428,7 +467,7 @@
c.codec = o.Codec
return nil
}
-func (o ForceCodecCallOption) after(c *callInfo) {}
+func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of
// an encoding.Codec.
@@ -441,7 +480,10 @@
// CustomCodecCallOption is a CallOption that indicates the codec used for
// marshaling messages.
//
-// This is an EXPERIMENTAL API.
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type CustomCodecCallOption struct {
Codec Codec
}
@@ -450,19 +492,26 @@
c.codec = o.Codec
return nil
}
-func (o CustomCodecCallOption) after(c *callInfo) {}
+func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
// used for buffering this RPC's requests for retry purposes.
//
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func MaxRetryRPCBufferSize(bytes int) CallOption {
return MaxRetryRPCBufferSizeCallOption{bytes}
}
// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of
// memory to be used for caching this RPC for retry purposes.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type MaxRetryRPCBufferSizeCallOption struct {
MaxRetryRPCBufferSize int
}
@@ -471,7 +520,7 @@
c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize
return nil
}
-func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {}
+func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
// The format of the payload: compressed or not?
type payloadFormat uint8
@@ -663,13 +712,11 @@
if err != nil {
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
}
- } else {
- size = len(d)
- }
- if size > maxReceiveMessageSize {
- // TODO: Revisit the error code. Currently keep it consistent with java
- // implementation.
- return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize)
+ if size > maxReceiveMessageSize {
+ // TODO: Revisit the error code. Currently keep it consistent with java
+ // implementation.
+ return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
+ }
}
return d, nil
}
@@ -780,33 +827,45 @@
// toRPCErr converts an error into an error from the status package.
func toRPCErr(err error) error {
- if err == nil || err == io.EOF {
+ switch err {
+ case nil, io.EOF:
return err
- }
- if err == io.ErrUnexpectedEOF {
+ case context.DeadlineExceeded:
+ return status.Error(codes.DeadlineExceeded, err.Error())
+ case context.Canceled:
+ return status.Error(codes.Canceled, err.Error())
+ case io.ErrUnexpectedEOF:
return status.Error(codes.Internal, err.Error())
}
- if _, ok := status.FromError(err); ok {
- return err
- }
+
switch e := err.(type) {
case transport.ConnectionError:
return status.Error(codes.Unavailable, e.Desc)
- default:
- switch err {
- case context.DeadlineExceeded:
- return status.Error(codes.DeadlineExceeded, err.Error())
- case context.Canceled:
- return status.Error(codes.Canceled, err.Error())
- }
+ case *transport.NewStreamError:
+ return toRPCErr(e.Err)
}
+
+ if _, ok := status.FromError(err); ok {
+ return err
+ }
+
return status.Error(codes.Unknown, err.Error())
}
// setCallInfoCodec should only be called after CallOptions have been applied.
func setCallInfoCodec(c *callInfo) error {
if c.codec != nil {
- // codec was already set by a CallOption; use it.
+ // codec was already set by a CallOption; use it, but set the content
+ // subtype if it is not set.
+ if c.contentSubtype == "" {
+ // c.codec is a baseCodec to hide the difference between grpc.Codec and
+ // encoding.Codec (Name vs. String method name). We only support
+ // setting content subtype from encoding.Codec to avoid a behavior
+ // change with the deprecated version.
+ if ec, ok := c.codec.(encoding.Codec); ok {
+ c.contentSubtype = strings.ToLower(ec.Name())
+ }
+ }
return nil
}
@@ -824,40 +883,6 @@
return nil
}
-// parseDialTarget returns the network and address to pass to dialer
-func parseDialTarget(target string) (net string, addr string) {
- net = "tcp"
-
- m1 := strings.Index(target, ":")
- m2 := strings.Index(target, ":/")
-
- // handle unix:addr which will fail with url.Parse
- if m1 >= 0 && m2 < 0 {
- if n := target[0:m1]; n == "unix" {
- net = n
- addr = target[m1+1:]
- return net, addr
- }
- }
- if m2 >= 0 {
- t, err := url.Parse(target)
- if err != nil {
- return net, target
- }
- scheme := t.Scheme
- addr = t.Path
- if scheme == "unix" {
- net = scheme
- if addr == "" {
- addr = t.Host
- }
- return net, addr
- }
- }
-
- return net, target
-}
-
// channelzData is used to store channelz related data for ClientConn, addrConn and Server.
// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic
// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
@@ -873,10 +898,9 @@
// The SupportPackageIsVersion variables are referenced from generated protocol
// buffer files to ensure compatibility with the gRPC version used. The latest
-// support package version is 6.
+// support package version is 7.
//
-// Older versions are kept for compatibility. They may be removed if
-// compatibility cannot be maintained.
+// Older versions are kept for compatibility.
//
// These constants should not be referenced from any other code.
const (
@@ -884,6 +908,7 @@
SupportPackageIsVersion4 = true
SupportPackageIsVersion5 = true
SupportPackageIsVersion6 = true
+ SupportPackageIsVersion7 = true
)
const grpcUA = "grpc-go/" + Version
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index edfcdca..eadf9e0 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -40,8 +40,10 @@
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/encoding/proto"
"google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/channelz"
+ "google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
@@ -55,9 +57,26 @@
const (
defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4
defaultServerMaxSendMessageSize = math.MaxInt32
+
+ // Server transports are tracked in a map which is keyed on listener
+ // address. For regular gRPC traffic, connections are accepted in Serve()
+ // through a call to Accept(), and we use the actual listener address as key
+ // when we add it to the map. But for connections received through
+ // ServeHTTP(), we do not have a listener and hence use this dummy value.
+ listenerAddressForServeHTTP = "listenerAddressForServeHTTP"
)
+func init() {
+ internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials {
+ return srv.opts.creds
+ }
+ internal.DrainServerTransports = func(srv *Server, addr string) {
+ srv.drainServerTransports(addr)
+ }
+}
+
var statusOK = status.New(codes.OK, "")
+var logger = grpclog.Component("core")
type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
@@ -78,27 +97,37 @@
Metadata interface{}
}
-// service consists of the information of the server serving this service and
-// the methods in this service.
-type service struct {
- server interface{} // the server for service methods
- md map[string]*MethodDesc
- sd map[string]*StreamDesc
- mdata interface{}
+// serviceInfo wraps information about a service. It is very similar to
+// ServiceDesc and is constructed from it for internal purposes.
+type serviceInfo struct {
+ // Contains the implementation for the methods in this service.
+ serviceImpl interface{}
+ methods map[string]*MethodDesc
+ streams map[string]*StreamDesc
+ mdata interface{}
+}
+
+type serverWorkerData struct {
+ st transport.ServerTransport
+ wg *sync.WaitGroup
+ stream *transport.Stream
}
// Server is a gRPC server to serve RPC requests.
type Server struct {
opts serverOptions
- mu sync.Mutex // guards following
- lis map[net.Listener]bool
- conns map[transport.ServerTransport]bool
- serve bool
- drain bool
- cv *sync.Cond // signaled when connections close for GracefulStop
- m map[string]*service // service name -> service info
- events trace.EventLog
+ mu sync.Mutex // guards following
+ lis map[net.Listener]bool
+ // conns contains all active server transports. It is a map keyed on a
+ // listener address with the value being the set of active transports
+ // belonging to that listener.
+ conns map[string]map[transport.ServerTransport]bool
+ serve bool
+ drain bool
+ cv *sync.Cond // signaled when connections close for GracefulStop
+ services map[string]*serviceInfo // service name -> service info
+ events trace.EventLog
quit *grpcsync.Event
done *grpcsync.Event
@@ -107,6 +136,8 @@
channelzID int64 // channelz unique identification number
czData *channelzData
+
+ serverWorkerChannels []chan *serverWorkerData
}
type serverOptions struct {
@@ -133,6 +164,7 @@
connectionTimeout time.Duration
maxHeaderListSize *uint32
headerTableSize *uint32
+ numServerWorkers uint32
}
var defaultServerOptions = serverOptions{
@@ -151,7 +183,10 @@
// EmptyServerOption does not alter the server configuration. It can be embedded
// in another structure to build custom server options.
//
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type EmptyServerOption struct{}
func (EmptyServerOption) apply(*serverOptions) {}
@@ -213,7 +248,7 @@
// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
if kp.Time > 0 && kp.Time < time.Second {
- grpclog.Warning("Adjusting keepalive ping interval to minimum period of 1s")
+ logger.Warning("Adjusting keepalive ping interval to minimum period of 1s")
kp.Time = time.Second
}
@@ -232,19 +267,55 @@
// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
//
// This will override any lookups by content-subtype for Codecs registered with RegisterCodec.
+//
+// Deprecated: register codecs using encoding.RegisterCodec. The server will
+// automatically use registered codecs based on the incoming requests' headers.
+// See also
+// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec.
+// Will be supported throughout 1.x.
func CustomCodec(codec Codec) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.codec = codec
})
}
+// ForceServerCodec returns a ServerOption that sets a codec for message
+// marshaling and unmarshaling.
+//
+// This will override any lookups by content-subtype for Codecs registered
+// with RegisterCodec.
+//
+// See Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details. Also see the documentation on RegisterCodec and
+// CallContentSubtype for more details on the interaction between encoding.Codec
+// and content-subtype.
+//
+// This function is provided for advanced users; prefer to register codecs
+// using encoding.RegisterCodec.
+// The server will automatically use registered codecs based on the incoming
+// requests' headers. See also
+// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec.
+// Will be supported throughout 1.x.
+//
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ForceServerCodec(codec encoding.Codec) ServerOption {
+ return newFuncServerOption(func(o *serverOptions) {
+ o.codec = codec
+ })
+}
+
// RPCCompressor returns a ServerOption that sets a compressor for outbound
// messages. For backward compatibility, all outbound messages will be sent
// using this compressor, regardless of incoming message compression. By
// default, server messages will be sent using the same compressor with which
// request messages were sent.
//
-// Deprecated: use encoding.RegisterCompressor instead.
+// Deprecated: use encoding.RegisterCompressor instead. Will be supported
+// throughout 1.x.
func RPCCompressor(cp Compressor) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.cp = cp
@@ -255,7 +326,8 @@
// messages. It has higher priority than decompressors registered via
// encoding.RegisterCompressor.
//
-// Deprecated: use encoding.RegisterCompressor instead.
+// Deprecated: use encoding.RegisterCompressor instead. Will be supported
+// throughout 1.x.
func RPCDecompressor(dc Decompressor) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.dc = dc
@@ -265,7 +337,7 @@
// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
// If this is not set, gRPC uses the default limit.
//
-// Deprecated: use MaxRecvMsgSize instead.
+// Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x.
func MaxMsgSize(m int) ServerOption {
return MaxRecvMsgSize(m)
}
@@ -335,7 +407,7 @@
}
// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor
-// for stream RPCs. The first interceptor will be the outer most,
+// for streaming RPCs. The first interceptor will be the outer most,
// while the last interceptor will be the inner most wrapper around the real call.
// All stream interceptors added by this method will be chained.
func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption {
@@ -346,6 +418,11 @@
// InTapHandle returns a ServerOption that sets the tap handle for all the server
// transport to be created. Only one can be installed.
+//
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func InTapHandle(h tap.ServerInHandle) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
if o.inTapHandle != nil {
@@ -385,7 +462,10 @@
// new connections. If this is not set, the default is 120 seconds. A zero or
// negative value will result in an immediate timeout.
//
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func ConnectionTimeout(d time.Duration) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.connectionTimeout = d
@@ -403,13 +483,79 @@
// HeaderTableSize returns a ServerOption that sets the size of dynamic
// header table for stream.
//
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func HeaderTableSize(s uint32) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.headerTableSize = &s
})
}
+// NumStreamWorkers returns a ServerOption that sets the number of worker
+// goroutines that should be used to process incoming streams. Setting this to
+// zero (default) will disable workers and spawn a new goroutine for each
+// stream.
+//
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func NumStreamWorkers(numServerWorkers uint32) ServerOption {
+ // TODO: If/when this API gets stabilized (i.e. stream workers become the
+ // only way streams are processed), change the behavior of the zero value to
+ // a sane default. Preliminary experiments suggest that a value equal to the
+ // number of CPUs available is most performant; requires thorough testing.
+ return newFuncServerOption(func(o *serverOptions) {
+ o.numServerWorkers = numServerWorkers
+ })
+}
+
+// serverWorkerResetThreshold defines how often the stack must be reset. Every
+// N requests, by spawning a new goroutine in its place, a worker can reset its
+// stack so that large stacks don't live in memory forever. 2^16 should allow
+// each goroutine stack to live for at least a few seconds in a typical
+// workload (assuming a QPS of a few thousand requests/sec).
+const serverWorkerResetThreshold = 1 << 16
+
+// serverWorkers blocks on a *transport.Stream channel forever and waits for
+// data to be fed by serveStreams. This allows different requests to be
+// processed by the same goroutine, removing the need for expensive stack
+// re-allocations (see the runtime.morestack problem [1]).
+//
+// [1] https://github.com/golang/go/issues/18138
+func (s *Server) serverWorker(ch chan *serverWorkerData) {
+ // To make sure all server workers don't reset at the same time, choose a
+ // random number of iterations before resetting.
+ threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold)
+ for completed := 0; completed < threshold; completed++ {
+ data, ok := <-ch
+ if !ok {
+ return
+ }
+ s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream))
+ data.wg.Done()
+ }
+ go s.serverWorker(ch)
+}
+
+// initServerWorkers creates worker goroutines and channels to process incoming
+// connections to reduce the time spent overall on runtime.morestack.
+func (s *Server) initServerWorkers() {
+ s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers)
+ for i := uint32(0); i < s.opts.numServerWorkers; i++ {
+ s.serverWorkerChannels[i] = make(chan *serverWorkerData)
+ go s.serverWorker(s.serverWorkerChannels[i])
+ }
+}
+
+func (s *Server) stopServerWorkers() {
+ for i := uint32(0); i < s.opts.numServerWorkers; i++ {
+ close(s.serverWorkerChannels[i])
+ }
+}
+
// NewServer creates a gRPC server which has no service registered and has not
// started to accept requests yet.
func NewServer(opt ...ServerOption) *Server {
@@ -418,13 +564,13 @@
o.apply(&opts)
}
s := &Server{
- lis: make(map[net.Listener]bool),
- opts: opts,
- conns: make(map[transport.ServerTransport]bool),
- m: make(map[string]*service),
- quit: grpcsync.NewEvent(),
- done: grpcsync.NewEvent(),
- czData: new(channelzData),
+ lis: make(map[net.Listener]bool),
+ opts: opts,
+ conns: make(map[string]map[transport.ServerTransport]bool),
+ services: make(map[string]*serviceInfo),
+ quit: grpcsync.NewEvent(),
+ done: grpcsync.NewEvent(),
+ czData: new(channelzData),
}
chainUnaryServerInterceptors(s)
chainStreamServerInterceptors(s)
@@ -434,6 +580,10 @@
s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
}
+ if s.opts.numServerWorkers > 0 {
+ s.initServerWorkers()
+ }
+
if channelz.IsOn() {
s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
}
@@ -456,14 +606,29 @@
}
}
+// ServiceRegistrar wraps a single method that supports service registration. It
+// enables users to pass concrete types other than grpc.Server to the service
+// registration methods exported by the IDL generated code.
+type ServiceRegistrar interface {
+ // RegisterService registers a service and its implementation to the
+ // concrete type implementing this interface. It may not be called
+ // once the server has started serving.
+ // desc describes the service and its methods and handlers. impl is the
+ // service implementation which is passed to the method handlers.
+ RegisterService(desc *ServiceDesc, impl interface{})
+}
+
// RegisterService registers a service and its implementation to the gRPC
// server. It is called from the IDL generated code. This must be called before
-// invoking Serve.
+// invoking Serve. If ss is non-nil (for legacy code), its type is checked to
+// ensure it implements sd.HandlerType.
func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
- ht := reflect.TypeOf(sd.HandlerType).Elem()
- st := reflect.TypeOf(ss)
- if !st.Implements(ht) {
- grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht)
+ if ss != nil {
+ ht := reflect.TypeOf(sd.HandlerType).Elem()
+ st := reflect.TypeOf(ss)
+ if !st.Implements(ht) {
+ logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht)
+ }
}
s.register(sd, ss)
}
@@ -473,26 +638,26 @@
defer s.mu.Unlock()
s.printf("RegisterService(%q)", sd.ServiceName)
if s.serve {
- grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName)
+ logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName)
}
- if _, ok := s.m[sd.ServiceName]; ok {
- grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
+ if _, ok := s.services[sd.ServiceName]; ok {
+ logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
}
- srv := &service{
- server: ss,
- md: make(map[string]*MethodDesc),
- sd: make(map[string]*StreamDesc),
- mdata: sd.Metadata,
+ info := &serviceInfo{
+ serviceImpl: ss,
+ methods: make(map[string]*MethodDesc),
+ streams: make(map[string]*StreamDesc),
+ mdata: sd.Metadata,
}
for i := range sd.Methods {
d := &sd.Methods[i]
- srv.md[d.MethodName] = d
+ info.methods[d.MethodName] = d
}
for i := range sd.Streams {
d := &sd.Streams[i]
- srv.sd[d.StreamName] = d
+ info.streams[d.StreamName] = d
}
- s.m[sd.ServiceName] = srv
+ s.services[sd.ServiceName] = info
}
// MethodInfo contains the information of an RPC including its method name and type.
@@ -516,16 +681,16 @@
// Service names include the package names, in the form of <package>.<service>.
func (s *Server) GetServiceInfo() map[string]ServiceInfo {
ret := make(map[string]ServiceInfo)
- for n, srv := range s.m {
- methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd))
- for m := range srv.md {
+ for n, srv := range s.services {
+ methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams))
+ for m := range srv.methods {
methods = append(methods, MethodInfo{
Name: m,
IsClientStream: false,
IsServerStream: false,
})
}
- for m, d := range srv.sd {
+ for m, d := range srv.streams {
methods = append(methods, MethodInfo{
Name: m,
IsClientStream: d.ClientStreams,
@@ -545,13 +710,6 @@
// the server being stopped.
var ErrServerStopped = errors.New("grpc: the server has been stopped")
-func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
- if s.opts.creds == nil {
- return rawConn, nil, nil
- }
- return s.opts.creds.ServerHandshake(rawConn)
-}
-
type listenSocket struct {
net.Listener
channelzID int64
@@ -660,7 +818,7 @@
// s.conns before this conn can be added.
s.serveWG.Add(1)
go func() {
- s.handleRawConn(rawConn)
+ s.handleRawConn(lis.Addr().String(), rawConn)
s.serveWG.Done()
}()
}
@@ -668,49 +826,45 @@
// handleRawConn forks a goroutine to handle a just-accepted connection that
// has not had any I/O performed on it yet.
-func (s *Server) handleRawConn(rawConn net.Conn) {
+func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) {
if s.quit.HasFired() {
rawConn.Close()
return
}
rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
- conn, authInfo, err := s.useTransportAuthenticator(rawConn)
- if err != nil {
- // ErrConnDispatched means that the connection was dispatched away from
- // gRPC; those connections should be left open.
- if err != credentials.ErrConnDispatched {
- s.mu.Lock()
- s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
- s.mu.Unlock()
- channelz.Warningf(s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
- rawConn.Close()
- }
- rawConn.SetDeadline(time.Time{})
- return
- }
// Finish handshaking (HTTP2)
- st := s.newHTTP2Transport(conn, authInfo)
+ st := s.newHTTP2Transport(rawConn)
+ rawConn.SetDeadline(time.Time{})
if st == nil {
return
}
- rawConn.SetDeadline(time.Time{})
- if !s.addConn(st) {
+ if !s.addConn(lisAddr, st) {
return
}
go func() {
s.serveStreams(st)
- s.removeConn(st)
+ s.removeConn(lisAddr, st)
}()
}
+func (s *Server) drainServerTransports(addr string) {
+ s.mu.Lock()
+ conns := s.conns[addr]
+ for st := range conns {
+ st.Drain()
+ }
+ s.mu.Unlock()
+}
+
// newHTTP2Transport sets up a http/2 transport (using the
// gRPC http2 server transport in transport/http2_server.go).
-func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport {
+func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
config := &transport.ServerConfig{
MaxStreams: s.opts.maxConcurrentStreams,
- AuthInfo: authInfo,
+ ConnectionTimeout: s.opts.connectionTimeout,
+ Credentials: s.opts.creds,
InTapHandle: s.opts.inTapHandle,
StatsHandler: s.opts.statsHandler,
KeepaliveParams: s.opts.keepaliveParams,
@@ -723,13 +877,20 @@
MaxHeaderListSize: s.opts.maxHeaderListSize,
HeaderTableSize: s.opts.headerTableSize,
}
- st, err := transport.NewServerTransport("http2", c, config)
+ st, err := transport.NewServerTransport(c, config)
if err != nil {
s.mu.Lock()
s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
s.mu.Unlock()
- c.Close()
- channelz.Warning(s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err)
+ // ErrConnDispatched means that the connection was dispatched away from
+ // gRPC; those connections should be left open.
+ if err != credentials.ErrConnDispatched {
+ // Don't log on ErrConnDispatched and io.EOF to prevent log spam.
+ if err != io.EOF {
+ channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err)
+ }
+ c.Close()
+ }
return nil
}
@@ -739,12 +900,27 @@
func (s *Server) serveStreams(st transport.ServerTransport) {
defer st.Close()
var wg sync.WaitGroup
+
+ var roundRobinCounter uint32
st.HandleStreams(func(stream *transport.Stream) {
wg.Add(1)
- go func() {
- defer wg.Done()
- s.handleStream(st, stream, s.traceInfo(st, stream))
- }()
+ if s.opts.numServerWorkers > 0 {
+ data := &serverWorkerData{st: st, wg: &wg, stream: stream}
+ select {
+ case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data:
+ default:
+ // If all stream workers are busy, fallback to the default code path.
+ go func() {
+ s.handleStream(st, stream, s.traceInfo(st, stream))
+ wg.Done()
+ }()
+ }
+ } else {
+ go func() {
+ defer wg.Done()
+ s.handleStream(st, stream, s.traceInfo(st, stream))
+ }()
+ }
}, func(ctx context.Context, method string) context.Context {
if !EnableTracing {
return ctx
@@ -779,18 +955,22 @@
// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally
// separate from grpc-go's HTTP/2 server. Performance and features may vary
// between the two paths. ServeHTTP does not support some gRPC features
-// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL
-// and subject to change.
+// available through grpc-go's HTTP/2 server.
+//
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
- if !s.addConn(st) {
+ if !s.addConn(listenerAddressForServeHTTP, st) {
return
}
- defer s.removeConn(st)
+ defer s.removeConn(listenerAddressForServeHTTP, st)
s.serveStreams(st)
}
@@ -818,7 +998,7 @@
return trInfo
}
-func (s *Server) addConn(st transport.ServerTransport) bool {
+func (s *Server) addConn(addr string, st transport.ServerTransport) bool {
s.mu.Lock()
defer s.mu.Unlock()
if s.conns == nil {
@@ -830,15 +1010,28 @@
// immediately.
st.Drain()
}
- s.conns[st] = true
+
+ if s.conns[addr] == nil {
+ // Create a map entry if this is the first connection on this listener.
+ s.conns[addr] = make(map[transport.ServerTransport]bool)
+ }
+ s.conns[addr][st] = true
return true
}
-func (s *Server) removeConn(st transport.ServerTransport) {
+func (s *Server) removeConn(addr string, st transport.ServerTransport) {
s.mu.Lock()
defer s.mu.Unlock()
- if s.conns != nil {
- delete(s.conns, st)
+
+ conns := s.conns[addr]
+ if conns != nil {
+ delete(conns, st)
+ if len(conns) == 0 {
+ // If the last connection for this address is being removed, also
+ // remove the map entry corresponding to the address. This is used
+ // in GracefulStop() when waiting for all connections to be closed.
+ delete(s.conns, addr)
+ }
s.cv.Broadcast()
}
}
@@ -868,12 +1061,12 @@
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
if err != nil {
- channelz.Error(s.channelzID, "grpc: server failed to encode response: ", err)
+ channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err)
return err
}
compData, err := compress(data, cp, comp)
if err != nil {
- channelz.Error(s.channelzID, "grpc: server failed to compress response: ", err)
+ channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err)
return err
}
hdr, payload := msgHeader(data, compData)
@@ -903,26 +1096,33 @@
} else if len(interceptors) == 1 {
chainedInt = interceptors[0]
} else {
- chainedInt = func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) {
- return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler))
- }
+ chainedInt = chainUnaryInterceptors(interceptors)
}
s.opts.unaryInt = chainedInt
}
-// getChainUnaryHandler recursively generate the chained UnaryHandler
-func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler {
- if curr == len(interceptors)-1 {
- return finalHandler
- }
-
- return func(ctx context.Context, req interface{}) (interface{}, error) {
- return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler))
+func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor {
+ return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) {
+ // the struct ensures the variables are allocated together, rather than separately, since we
+ // know they should be garbage collected together. This saves 1 allocation and decreases
+ // time/call by about 10% on the microbenchmark.
+ var state struct {
+ i int
+ next UnaryHandler
+ }
+ state.next = func(ctx context.Context, req interface{}) (interface{}, error) {
+ if state.i == len(interceptors)-1 {
+ return interceptors[state.i](ctx, req, info, handler)
+ }
+ state.i++
+ return interceptors[state.i-1](ctx, req, info, state.next)
+ }
+ return state.next(ctx, req)
}
}
-func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
+func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
sh := s.opts.statsHandler
if sh != nil || trInfo != nil || channelz.IsOn() {
if channelz.IsOn() {
@@ -932,7 +1132,9 @@
if sh != nil {
beginTime := time.Now()
statsBegin = &stats.Begin{
- BeginTime: beginTime,
+ BeginTime: beginTime,
+ IsClientStream: false,
+ IsServerStream: false,
}
sh.HandleRPC(stream.Context(), statsBegin)
}
@@ -1045,10 +1247,8 @@
}
d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
if err != nil {
- if st, ok := status.FromError(err); ok {
- if e := t.WriteStatus(stream, st); e != nil {
- channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e)
- }
+ if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
+ channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e)
}
return err
}
@@ -1063,7 +1263,7 @@
sh.HandleRPC(stream.Context(), &stats.InPayload{
RecvTime: time.Now(),
Payload: v,
- WireLength: payInfo.wireLength,
+ WireLength: payInfo.wireLength + headerLen,
Data: d,
Length: len(d),
})
@@ -1079,7 +1279,7 @@
return nil
}
ctx := NewContextWithServerTransportStream(stream.Context(), stream)
- reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt)
+ reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt)
if appErr != nil {
appStatus, ok := status.FromError(appErr)
if !ok {
@@ -1092,7 +1292,7 @@
trInfo.tr.SetError()
}
if e := t.WriteStatus(stream, appStatus); e != nil {
- channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
+ channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
}
if binlog != nil {
if h, _ := stream.Header(); h.Len() > 0 {
@@ -1121,7 +1321,7 @@
}
if sts, ok := status.FromError(err); ok {
if e := t.WriteStatus(stream, sts); e != nil {
- channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
+ channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
}
} else {
switch st := err.(type) {
@@ -1186,26 +1386,33 @@
} else if len(interceptors) == 1 {
chainedInt = interceptors[0]
} else {
- chainedInt = func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
- return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler))
- }
+ chainedInt = chainStreamInterceptors(interceptors)
}
s.opts.streamInt = chainedInt
}
-// getChainStreamHandler recursively generate the chained StreamHandler
-func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler {
- if curr == len(interceptors)-1 {
- return finalHandler
- }
-
- return func(srv interface{}, ss ServerStream) error {
- return interceptors[curr+1](srv, ss, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler))
+func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor {
+ return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
+ // the struct ensures the variables are allocated together, rather than separately, since we
+ // know they should be garbage collected together. This saves 1 allocation and decreases
+ // time/call by about 10% on the microbenchmark.
+ var state struct {
+ i int
+ next StreamHandler
+ }
+ state.next = func(srv interface{}, ss ServerStream) error {
+ if state.i == len(interceptors)-1 {
+ return interceptors[state.i](srv, ss, info, handler)
+ }
+ state.i++
+ return interceptors[state.i-1](srv, ss, info, state.next)
+ }
+ return state.next(srv, ss)
}
}
-func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
+func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
if channelz.IsOn() {
s.incrCallsStarted()
}
@@ -1214,7 +1421,9 @@
if sh != nil {
beginTime := time.Now()
statsBegin = &stats.Begin{
- BeginTime: beginTime,
+ BeginTime: beginTime,
+ IsClientStream: sd.ClientStreams,
+ IsServerStream: sd.ServerStreams,
}
sh.HandleRPC(stream.Context(), statsBegin)
}
@@ -1317,13 +1526,15 @@
}
}
+ ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp)
+
if trInfo != nil {
trInfo.tr.LazyLog(&trInfo.firstLine, false)
}
var appErr error
var server interface{}
- if srv != nil {
- server = srv.server
+ if info != nil {
+ server = info.serviceImpl
}
if s.opts.streamInt == nil {
appErr = sd.Handler(server, ss)
@@ -1384,12 +1595,12 @@
trInfo.tr.SetError()
}
errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
- if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil {
+ if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
trInfo.tr.SetError()
}
- channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
+ channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
}
if trInfo != nil {
trInfo.tr.Finish()
@@ -1399,13 +1610,13 @@
service := sm[:pos]
method := sm[pos+1:]
- srv, knownService := s.m[service]
+ srv, knownService := s.services[service]
if knownService {
- if md, ok := srv.md[method]; ok {
+ if md, ok := srv.methods[method]; ok {
s.processUnaryRPC(t, stream, srv, md, trInfo)
return
}
- if sd, ok := srv.sd[method]; ok {
+ if sd, ok := srv.streams[method]; ok {
s.processStreamingRPC(t, stream, srv, sd, trInfo)
return
}
@@ -1430,7 +1641,7 @@
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
trInfo.tr.SetError()
}
- channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
+ channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
}
if trInfo != nil {
trInfo.tr.Finish()
@@ -1443,7 +1654,10 @@
// NewContextWithServerTransportStream creates a new context from ctx and
// attaches stream to it.
//
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context {
return context.WithValue(ctx, streamKey{}, stream)
}
@@ -1455,7 +1669,10 @@
//
// See also NewContextWithServerTransportStream.
//
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type ServerTransportStream interface {
Method() string
SetHeader(md metadata.MD) error
@@ -1467,7 +1684,10 @@
// ctx. Returns nil if the given context has no stream associated with it
// (which implies it is not an RPC invocation context).
//
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream {
s, _ := ctx.Value(streamKey{}).(ServerTransportStream)
return s
@@ -1495,7 +1715,7 @@
s.mu.Lock()
listeners := s.lis
s.lis = nil
- st := s.conns
+ conns := s.conns
s.conns = nil
// interrupt GracefulStop if Stop and GracefulStop are called concurrently.
s.cv.Broadcast()
@@ -1504,8 +1724,13 @@
for lis := range listeners {
lis.Close()
}
- for c := range st {
- c.Close()
+ for _, cs := range conns {
+ for st := range cs {
+ st.Close()
+ }
+ }
+ if s.opts.numServerWorkers > 0 {
+ s.stopServerWorkers()
}
s.mu.Lock()
@@ -1539,8 +1764,10 @@
}
s.lis = nil
if !s.drain {
- for st := range s.conns {
- st.Drain()
+ for _, conns := range s.conns {
+ for st := range conns {
+ st.Drain()
+ }
}
s.drain = true
}
diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go
index 5a80a57..22c4240 100644
--- a/vendor/google.golang.org/grpc/service_config.go
+++ b/vendor/google.golang.org/grpc/service_config.go
@@ -20,15 +20,16 @@
import (
"encoding/json"
+ "errors"
"fmt"
+ "reflect"
"strconv"
"strings"
"time"
- "google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
+ internalserviceconfig "google.golang.org/grpc/internal/serviceconfig"
"google.golang.org/grpc/serviceconfig"
)
@@ -40,29 +41,7 @@
// Deprecated: Users should not use this struct. Service config should be received
// through name resolver, as specified here
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
-type MethodConfig struct {
- // WaitForReady indicates whether RPCs sent to this method should wait until
- // the connection is ready by default (!failfast). The value specified via the
- // gRPC client API will override the value set here.
- WaitForReady *bool
- // Timeout is the default timeout for RPCs sent to this method. The actual
- // deadline used will be the minimum of the value specified here and the value
- // set by the application via the gRPC client API. If either one is not set,
- // then the other will be used. If neither is set, then the RPC has no deadline.
- Timeout *time.Duration
- // MaxReqSize is the maximum allowed payload size for an individual request in a
- // stream (client->server) in bytes. The size which is measured is the serialized
- // payload after per-message compression (but before stream compression) in bytes.
- // The actual value used is the minimum of the value specified here and the value set
- // by the application via the gRPC client API. If either one is not set, then the other
- // will be used. If neither is set, then the built-in default is used.
- MaxReqSize *int
- // MaxRespSize is the maximum allowed payload size for an individual response in a
- // stream (server->client) in bytes.
- MaxRespSize *int
- // RetryPolicy configures retry options for the method.
- retryPolicy *retryPolicy
-}
+type MethodConfig = internalserviceconfig.MethodConfig
type lbConfig struct {
name string
@@ -79,7 +58,7 @@
serviceconfig.Config
// LB is the load balancer the service providers recommends. The balancer
- // specified via grpc.WithBalancer will override this. This is deprecated;
+ // specified via grpc.WithBalancerName will override this. This is deprecated;
// lbConfigs is preferred. If lbConfig and LB are both present, lbConfig
// will be used.
LB *string
@@ -126,34 +105,6 @@
ServiceName string
}
-// retryPolicy defines the go-native version of the retry policy defined by the
-// service config here:
-// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
-type retryPolicy struct {
- // MaxAttempts is the maximum number of attempts, including the original RPC.
- //
- // This field is required and must be two or greater.
- maxAttempts int
-
- // Exponential backoff parameters. The initial retry attempt will occur at
- // random(0, initialBackoff). In general, the nth attempt will occur at
- // random(0,
- // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)).
- //
- // These fields are required and must be greater than zero.
- initialBackoff time.Duration
- maxBackoff time.Duration
- backoffMultiplier float64
-
- // The set of status codes which may be retried.
- //
- // Status codes are specified as strings, e.g., "UNAVAILABLE".
- //
- // This field is required and must be non-empty.
- // Note: a set is used to store this for easy lookup.
- retryableStatusCodes map[codes.Code]bool
-}
-
type jsonRetryPolicy struct {
MaxAttempts int
InitialBackoff string
@@ -224,19 +175,27 @@
}
type jsonName struct {
- Service *string
- Method *string
+ Service string
+ Method string
}
-func (j jsonName) generatePath() (string, bool) {
- if j.Service == nil {
- return "", false
+var (
+ errDuplicatedName = errors.New("duplicated name")
+ errEmptyServiceNonEmptyMethod = errors.New("cannot combine empty 'service' and non-empty 'method'")
+)
+
+func (j jsonName) generatePath() (string, error) {
+ if j.Service == "" {
+ if j.Method != "" {
+ return "", errEmptyServiceNonEmptyMethod
+ }
+ return "", nil
}
- res := "/" + *j.Service + "/"
- if j.Method != nil {
- res += *j.Method
+ res := "/" + j.Service + "/"
+ if j.Method != "" {
+ res += j.Method
}
- return res, true
+ return res, nil
}
// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
@@ -249,12 +208,10 @@
RetryPolicy *jsonRetryPolicy
}
-type loadBalancingConfig map[string]json.RawMessage
-
// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
type jsonSC struct {
LoadBalancingPolicy *string
- LoadBalancingConfig *[]loadBalancingConfig
+ LoadBalancingConfig *internalserviceconfig.BalancerConfig
MethodConfig *[]jsonMC
RetryThrottling *retryThrottlingPolicy
HealthCheckConfig *healthCheckConfig
@@ -270,7 +227,7 @@
var rsc jsonSC
err := json.Unmarshal([]byte(js), &rsc)
if err != nil {
- grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
+ logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
return &serviceconfig.ParseResult{Err: err}
}
sc := ServiceConfig{
@@ -280,53 +237,25 @@
healthCheckConfig: rsc.HealthCheckConfig,
rawJSONString: js,
}
- if rsc.LoadBalancingConfig != nil {
- for i, lbcfg := range *rsc.LoadBalancingConfig {
- if len(lbcfg) != 1 {
- err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
- grpclog.Warningf(err.Error())
- return &serviceconfig.ParseResult{Err: err}
- }
- var name string
- var jsonCfg json.RawMessage
- for name, jsonCfg = range lbcfg {
- }
- builder := balancer.Get(name)
- if builder == nil {
- continue
- }
- sc.lbConfig = &lbConfig{name: name}
- if parser, ok := builder.(balancer.ConfigParser); ok {
- var err error
- sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg)
- if err != nil {
- return &serviceconfig.ParseResult{Err: fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)}
- }
- } else if string(jsonCfg) != "{}" {
- grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
- }
- break
- }
- if sc.lbConfig == nil {
- // We had a loadBalancingConfig field but did not encounter a
- // supported policy. The config is considered invalid in this
- // case.
- err := fmt.Errorf("invalid loadBalancingConfig: no supported policies found")
- grpclog.Warningf(err.Error())
- return &serviceconfig.ParseResult{Err: err}
+ if c := rsc.LoadBalancingConfig; c != nil {
+ sc.lbConfig = &lbConfig{
+ name: c.Name,
+ cfg: c.Config,
}
}
if rsc.MethodConfig == nil {
return &serviceconfig.ParseResult{Config: &sc}
}
+
+ paths := map[string]struct{}{}
for _, m := range *rsc.MethodConfig {
if m.Name == nil {
continue
}
d, err := parseDuration(m.Timeout)
if err != nil {
- grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
+ logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
return &serviceconfig.ParseResult{Err: err}
}
@@ -334,8 +263,8 @@
WaitForReady: m.WaitForReady,
Timeout: d,
}
- if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
- grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
+ if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
+ logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
return &serviceconfig.ParseResult{Err: err}
}
if m.MaxRequestMessageBytes != nil {
@@ -352,10 +281,20 @@
mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes))
}
}
- for _, n := range *m.Name {
- if path, valid := n.generatePath(); valid {
- sc.Methods[path] = mc
+ for i, n := range *m.Name {
+ path, err := n.generatePath()
+ if err != nil {
+ logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err)
+ return &serviceconfig.ParseResult{Err: err}
}
+
+ if _, ok := paths[path]; ok {
+ err = errDuplicatedName
+ logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err)
+ return &serviceconfig.ParseResult{Err: err}
+ }
+ paths[path] = struct{}{}
+ sc.Methods[path] = mc
}
}
@@ -370,7 +309,7 @@
return &serviceconfig.ParseResult{Config: &sc}
}
-func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) {
+func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) {
if jrp == nil {
return nil, nil
}
@@ -388,23 +327,23 @@
*mb <= 0 ||
jrp.BackoffMultiplier <= 0 ||
len(jrp.RetryableStatusCodes) == 0 {
- grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp)
+ logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp)
return nil, nil
}
- rp := &retryPolicy{
- maxAttempts: jrp.MaxAttempts,
- initialBackoff: *ib,
- maxBackoff: *mb,
- backoffMultiplier: jrp.BackoffMultiplier,
- retryableStatusCodes: make(map[codes.Code]bool),
+ rp := &internalserviceconfig.RetryPolicy{
+ MaxAttempts: jrp.MaxAttempts,
+ InitialBackoff: *ib,
+ MaxBackoff: *mb,
+ BackoffMultiplier: jrp.BackoffMultiplier,
+ RetryableStatusCodes: make(map[codes.Code]bool),
}
- if rp.maxAttempts > 5 {
+ if rp.MaxAttempts > 5 {
// TODO(retry): Make the max maxAttempts configurable.
- rp.maxAttempts = 5
+ rp.MaxAttempts = 5
}
for _, code := range jrp.RetryableStatusCodes {
- rp.retryableStatusCodes[code] = true
+ rp.RetryableStatusCodes[code] = true
}
return rp, nil
}
@@ -432,3 +371,34 @@
func newInt(b int) *int {
return &b
}
+
+func init() {
+ internal.EqualServiceConfigForTesting = equalServiceConfig
+}
+
+// equalServiceConfig compares two configs. The rawJSONString field is ignored,
+// because they may diff in white spaces.
+//
+// If any of them is NOT *ServiceConfig, return false.
+func equalServiceConfig(a, b serviceconfig.Config) bool {
+ aa, ok := a.(*ServiceConfig)
+ if !ok {
+ return false
+ }
+ bb, ok := b.(*ServiceConfig)
+ if !ok {
+ return false
+ }
+ aaRaw := aa.rawJSONString
+ aa.rawJSONString = ""
+ bbRaw := bb.rawJSONString
+ bb.rawJSONString = ""
+ defer func() {
+ aa.rawJSONString = aaRaw
+ bb.rawJSONString = bbRaw
+ }()
+ // Using reflect.DeepEqual instead of cmp.Equal because many balancer
+ // configs are unexported, and cmp.Equal cannot compare unexported fields
+ // from unexported structs.
+ return reflect.DeepEqual(aa, bb)
+}
diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
index 187c304..73a2f92 100644
--- a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
+++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
@@ -19,7 +19,10 @@
// Package serviceconfig defines types and methods for operating on gRPC
// service configs.
//
-// This package is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
package serviceconfig
// Config represents an opaque data structure holding a service config.
diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go
index a7970c7..0285dcc 100644
--- a/vendor/google.golang.org/grpc/stats/stats.go
+++ b/vendor/google.golang.org/grpc/stats/stats.go
@@ -16,8 +16,6 @@
*
*/
-//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto
-
// Package stats is for collecting and reporting various network and RPC stats.
// This package is for monitoring purpose only. All fields are read-only.
// All APIs are experimental.
@@ -38,15 +36,22 @@
IsClient() bool
}
-// Begin contains stats when an RPC begins.
+// Begin contains stats when an RPC attempt begins.
// FailFast is only valid if this Begin is from client side.
type Begin struct {
// Client is true if this Begin is from client side.
Client bool
- // BeginTime is the time when the RPC begins.
+ // BeginTime is the time when the RPC attempt begins.
BeginTime time.Time
// FailFast indicates if this RPC is failfast.
FailFast bool
+ // IsClientStream indicates whether the RPC is a client streaming RPC.
+ IsClientStream bool
+ // IsServerStream indicates whether the RPC is a server streaming RPC.
+ IsServerStream bool
+ // IsTransparentRetryAttempt indicates whether this attempt was initiated
+ // due to transparently retrying a previous attempt.
+ IsTransparentRetryAttempt bool
}
// IsClient indicates if the stats information is from client side.
diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go
index 01e182c..6d163b6 100644
--- a/vendor/google.golang.org/grpc/status/status.go
+++ b/vendor/google.golang.org/grpc/status/status.go
@@ -29,6 +29,7 @@
import (
"context"
+ "errors"
"fmt"
spb "google.golang.org/genproto/googleapis/rpc/status"
@@ -73,9 +74,16 @@
return status.FromProto(s)
}
-// FromError returns a Status representing err if it was produced from this
-// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a
-// Status is returned with codes.Unknown and the original error message.
+// FromError returns a Status representation of err.
+//
+// - If err was produced by this package or implements the method `GRPCStatus()
+// *Status`, the appropriate Status is returned.
+//
+// - If err is nil, a Status is returned with codes.OK and no message.
+//
+// - Otherwise, err is an error not compatible with this package. In this
+// case, a Status is returned with codes.Unknown and err's Error() message,
+// and ok is false.
func FromError(err error) (s *Status, ok bool) {
if err == nil {
return nil, true
@@ -110,18 +118,18 @@
return codes.Unknown
}
-// FromContextError converts a context error into a Status. It returns a
-// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is
-// non-nil and not a context error.
+// FromContextError converts a context error or wrapped context error into a
+// Status. It returns a Status with codes.OK if err is nil, or a Status with
+// codes.Unknown if err is non-nil and not a context error.
func FromContextError(err error) *Status {
- switch err {
- case nil:
+ if err == nil {
return nil
- case context.DeadlineExceeded:
- return New(codes.DeadlineExceeded, err.Error())
- case context.Canceled:
- return New(codes.Canceled, err.Error())
- default:
- return New(codes.Unknown, err.Error())
}
+ if errors.Is(err, context.DeadlineExceeded) {
+ return New(codes.DeadlineExceeded, err.Error())
+ }
+ if errors.Is(err, context.Canceled) {
+ return New(codes.Canceled, err.Error())
+ }
+ return New(codes.Unknown, err.Error())
}
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index 934ef68..625d47b 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -35,6 +35,9 @@
"google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcrand"
+ "google.golang.org/grpc/internal/grpcutil"
+ iresolver "google.golang.org/grpc/internal/resolver"
+ "google.golang.org/grpc/internal/serviceconfig"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
@@ -49,14 +52,20 @@
// of the RPC.
type StreamHandler func(srv interface{}, stream ServerStream) error
-// StreamDesc represents a streaming RPC service's method specification.
+// StreamDesc represents a streaming RPC service's method specification. Used
+// on the server when registering services and on the client when initiating
+// new streams.
type StreamDesc struct {
- StreamName string
- Handler StreamHandler
+ // StreamName and Handler are only used when registering handlers on a
+ // server.
+ StreamName string // the name of the method excluding the service
+ Handler StreamHandler // the handler called for the method
- // At least one of these is true.
- ServerStreams bool
- ClientStreams bool
+ // ServerStreams and ClientStreams are used for registering handlers on a
+ // server as well as defining RPC behavior when passed to NewClientStream
+ // and ClientConn.NewStream. At least one must be true.
+ ServerStreams bool // indicates the server can perform streaming sends
+ ClientStreams bool // indicates the client can perform streaming sends
}
// Stream defines the common interface a client or server stream has to satisfy.
@@ -163,13 +172,48 @@
}
}()
}
- c := defaultCallInfo()
// Provide an opportunity for the first RPC to see the first service config
// provided by the resolver.
if err := cc.waitForResolvedAddrs(ctx); err != nil {
return nil, err
}
- mc := cc.GetMethodConfig(method)
+
+ var mc serviceconfig.MethodConfig
+ var onCommit func()
+ var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
+ return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...)
+ }
+
+ rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method}
+ rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo)
+ if err != nil {
+ return nil, toRPCErr(err)
+ }
+
+ if rpcConfig != nil {
+ if rpcConfig.Context != nil {
+ ctx = rpcConfig.Context
+ }
+ mc = rpcConfig.MethodConfig
+ onCommit = rpcConfig.OnCommitted
+ if rpcConfig.Interceptor != nil {
+ rpcInfo.Context = nil
+ ns := newStream
+ newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
+ cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns)
+ if err != nil {
+ return nil, toRPCErr(err)
+ }
+ return cs, nil
+ }
+ }
+ }
+
+ return newStream(ctx, func() {})
+}
+
+func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) {
+ c := defaultCallInfo()
if mc.WaitForReady != nil {
c.failFast = !*mc.WaitForReady
}
@@ -206,6 +250,7 @@
Host: cc.authority,
Method: method,
ContentSubtype: c.contentSubtype,
+ DoneFunc: doneFunc,
}
// Set our outgoing compression according to the UseCompressor CallOption, if
@@ -229,33 +274,6 @@
if c.creds != nil {
callHdr.Creds = c.creds
}
- var trInfo *traceInfo
- if EnableTracing {
- trInfo = &traceInfo{
- tr: trace.New("grpc.Sent."+methodFamily(method), method),
- firstLine: firstLine{
- client: true,
- },
- }
- if deadline, ok := ctx.Deadline(); ok {
- trInfo.firstLine.deadline = time.Until(deadline)
- }
- trInfo.tr.LazyLog(&trInfo.firstLine, false)
- ctx = trace.NewContext(ctx, trInfo.tr)
- }
- ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp)
- sh := cc.dopts.copts.StatsHandler
- var beginTime time.Time
- if sh != nil {
- ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
- beginTime = time.Now()
- begin := &stats.Begin{
- Client: true,
- BeginTime: beginTime,
- FailFast: c.failFast,
- }
- sh.HandleRPC(ctx, begin)
- }
cs := &clientStream{
callHdr: callHdr,
@@ -269,18 +287,15 @@
cp: cp,
comp: comp,
cancel: cancel,
- beginTime: beginTime,
firstAttempt: true,
+ onCommit: onCommit,
}
if !cc.dopts.disableRetry {
cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)
}
cs.binlog = binarylog.GetMethodLogger(method)
- cs.callInfo.stream = cs
- // Only this initial attempt has stats/tracing.
- // TODO(dfawley): move to newAttempt when per-attempt stats are implemented.
- if err := cs.newAttemptLocked(sh, trInfo); err != nil {
+ if err := cs.newAttemptLocked(false /* isTransparent */); err != nil {
cs.finish(err)
return nil, err
}
@@ -328,8 +343,43 @@
// newAttemptLocked creates a new attempt with a transport.
// If it succeeds, then it replaces clientStream's attempt with this new attempt.
-func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) {
+func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) {
+ ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp)
+ method := cs.callHdr.Method
+ sh := cs.cc.dopts.copts.StatsHandler
+ var beginTime time.Time
+ if sh != nil {
+ ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast})
+ beginTime = time.Now()
+ begin := &stats.Begin{
+ Client: true,
+ BeginTime: beginTime,
+ FailFast: cs.callInfo.failFast,
+ IsClientStream: cs.desc.ClientStreams,
+ IsServerStream: cs.desc.ServerStreams,
+ IsTransparentRetryAttempt: isTransparent,
+ }
+ sh.HandleRPC(ctx, begin)
+ }
+
+ var trInfo *traceInfo
+ if EnableTracing {
+ trInfo = &traceInfo{
+ tr: trace.New("grpc.Sent."+methodFamily(method), method),
+ firstLine: firstLine{
+ client: true,
+ },
+ }
+ if deadline, ok := ctx.Deadline(); ok {
+ trInfo.firstLine.deadline = time.Until(deadline)
+ }
+ trInfo.tr.LazyLog(&trInfo.firstLine, false)
+ ctx = trace.NewContext(ctx, trInfo.tr)
+ }
+
newAttempt := &csAttempt{
+ ctx: ctx,
+ beginTime: beginTime,
cs: cs,
dc: cs.cc.dopts.dc,
statsHandler: sh,
@@ -344,10 +394,18 @@
}
}()
- if err := cs.ctx.Err(); err != nil {
+ if err := ctx.Err(); err != nil {
return toRPCErr(err)
}
- t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method)
+
+ if cs.cc.parsedTarget.Scheme == "xds" {
+ // Add extra metadata (metadata that will be added by transport) to context
+ // so the balancer can see them.
+ ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs(
+ "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype),
+ ))
+ }
+ t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method)
if err != nil {
return err
}
@@ -363,9 +421,11 @@
func (a *csAttempt) newStream() error {
cs := a.cs
cs.callHdr.PreviousAttempts = cs.numRetries
- s, err := a.t.NewStream(cs.ctx, cs.callHdr)
+ s, err := a.t.NewStream(a.ctx, cs.callHdr)
if err != nil {
- return toRPCErr(err)
+ // Return without converting to an RPC error so retry code can
+ // inspect.
+ return err
}
cs.attempt.s = s
cs.attempt.p = &parser{r: s}
@@ -386,8 +446,7 @@
cancel context.CancelFunc // cancels all attempts
- sentLast bool // sent an end stream
- beginTime time.Time
+ sentLast bool // sent an end stream
methodConfig *MethodConfig
@@ -418,7 +477,8 @@
// place where we need to check if the attempt is nil.
attempt *csAttempt
// TODO(hedging): hedging will have multiple attempts simultaneously.
- committed bool // active attempt committed for retry?
+ committed bool // active attempt committed for retry?
+ onCommit func()
buffer []func(a *csAttempt) error // operations to replay on retry
bufferSize int // current size of buffer
}
@@ -426,6 +486,7 @@
// csAttempt implements a single transport stream attempt within a
// clientStream.
type csAttempt struct {
+ ctx context.Context
cs *clientStream
t transport.ClientTransport
s *transport.Stream
@@ -444,9 +505,13 @@
trInfo *traceInfo
statsHandler stats.Handler
+ beginTime time.Time
}
func (cs *clientStream) commitAttemptLocked() {
+ if !cs.committed && cs.onCommit != nil {
+ cs.onCommit()
+ }
cs.committed = true
cs.buffer = nil
}
@@ -458,37 +523,57 @@
}
// shouldRetry returns nil if the RPC should be retried; otherwise it returns
-// the error that should be returned by the operation.
-func (cs *clientStream) shouldRetry(err error) error {
- if cs.attempt.s == nil && !cs.callInfo.failFast {
- // In the event of any error from NewStream (attempt.s == nil), we
- // never attempted to write anything to the wire, so we can retry
- // indefinitely for non-fail-fast RPCs.
- return nil
+// the error that should be returned by the operation. If the RPC should be
+// retried, the bool indicates whether it is being retried transparently.
+func (cs *clientStream) shouldRetry(err error) (bool, error) {
+ if cs.attempt.s == nil {
+ // Error from NewClientStream.
+ nse, ok := err.(*transport.NewStreamError)
+ if !ok {
+ // Unexpected, but assume no I/O was performed and the RPC is not
+ // fatal, so retry indefinitely.
+ return true, nil
+ }
+
+ // Unwrap and convert error.
+ err = toRPCErr(nse.Err)
+
+ // Never retry DoNotRetry errors, which indicate the RPC should not be
+ // retried due to max header list size violation, etc.
+ if nse.DoNotRetry {
+ return false, err
+ }
+
+ // In the event of a non-IO operation error from NewStream, we never
+ // attempted to write anything to the wire, so we can retry
+ // indefinitely.
+ if !nse.DoNotTransparentRetry {
+ return true, nil
+ }
}
if cs.finished || cs.committed {
// RPC is finished or committed; cannot retry.
- return err
+ return false, err
}
// Wait for the trailers.
+ unprocessed := false
if cs.attempt.s != nil {
<-cs.attempt.s.Done()
+ unprocessed = cs.attempt.s.Unprocessed()
}
- if cs.firstAttempt && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) {
+ if cs.firstAttempt && unprocessed {
// First attempt, stream unprocessed: transparently retry.
- cs.firstAttempt = false
- return nil
+ return true, nil
}
- cs.firstAttempt = false
if cs.cc.dopts.disableRetry {
- return err
+ return false, err
}
pushback := 0
hasPushback := false
if cs.attempt.s != nil {
if !cs.attempt.s.TrailersOnly() {
- return err
+ return false, err
}
// TODO(retry): Move down if the spec changes to not check server pushback
@@ -497,15 +582,15 @@
if len(sps) == 1 {
var e error
if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
- channelz.Infof(cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0])
+ channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0])
cs.retryThrottler.throttle() // This counts as a failure for throttling.
- return err
+ return false, err
}
hasPushback = true
} else if len(sps) > 1 {
- channelz.Warningf(cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps)
+ channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps)
cs.retryThrottler.throttle() // This counts as a failure for throttling.
- return err
+ return false, err
}
}
@@ -516,18 +601,18 @@
code = status.Convert(err).Code()
}
- rp := cs.methodConfig.retryPolicy
- if rp == nil || !rp.retryableStatusCodes[code] {
- return err
+ rp := cs.methodConfig.RetryPolicy
+ if rp == nil || !rp.RetryableStatusCodes[code] {
+ return false, err
}
// Note: the ordering here is important; we count this as a failure
// only if the code matched a retryable code.
if cs.retryThrottler.throttle() {
- return err
+ return false, err
}
- if cs.numRetries+1 >= rp.maxAttempts {
- return err
+ if cs.numRetries+1 >= rp.MaxAttempts {
+ return false, err
}
var dur time.Duration
@@ -535,9 +620,9 @@
dur = time.Millisecond * time.Duration(pushback)
cs.numRetriesSincePushback = 0
} else {
- fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback))
- cur := float64(rp.initialBackoff) * fact
- if max := float64(rp.maxBackoff); cur > max {
+ fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback))
+ cur := float64(rp.InitialBackoff) * fact
+ if max := float64(rp.MaxBackoff); cur > max {
cur = max
}
dur = time.Duration(grpcrand.Int63n(int64(cur)))
@@ -550,22 +635,24 @@
select {
case <-t.C:
cs.numRetries++
- return nil
+ return false, nil
case <-cs.ctx.Done():
t.Stop()
- return status.FromContextError(cs.ctx.Err()).Err()
+ return false, status.FromContextError(cs.ctx.Err()).Err()
}
}
// Returns nil if a retry was performed and succeeded; error otherwise.
func (cs *clientStream) retryLocked(lastErr error) error {
for {
- cs.attempt.finish(lastErr)
- if err := cs.shouldRetry(lastErr); err != nil {
+ cs.attempt.finish(toRPCErr(lastErr))
+ isTransparent, err := cs.shouldRetry(lastErr)
+ if err != nil {
cs.commitAttemptLocked()
return err
}
- if err := cs.newAttemptLocked(nil, nil); err != nil {
+ cs.firstAttempt = false
+ if err := cs.newAttemptLocked(isTransparent); err != nil {
return err
}
if lastErr = cs.replayBufferLocked(); lastErr == nil {
@@ -586,7 +673,11 @@
for {
if cs.committed {
cs.mu.Unlock()
- return op(cs.attempt)
+ // toRPCErr is used in case the error from the attempt comes from
+ // NewClientStream, which intentionally doesn't return a status
+ // error to allow for further inspection; all other errors should
+ // already be status errors.
+ return toRPCErr(op(cs.attempt))
}
a := cs.attempt
cs.mu.Unlock()
@@ -799,6 +890,15 @@
}
cs.finished = true
cs.commitAttemptLocked()
+ if cs.attempt != nil {
+ cs.attempt.finish(err)
+ // after functions all rely upon having a stream.
+ if cs.attempt.s != nil {
+ for _, o := range cs.opts {
+ o.after(cs.callInfo, cs.attempt)
+ }
+ }
+ }
cs.mu.Unlock()
// For binary logging. only log cancel in finish (could be caused by RPC ctx
// canceled or ClientConn closed). Trailer will be logged in RecvMsg.
@@ -820,15 +920,6 @@
cs.cc.incrCallsSucceeded()
}
}
- if cs.attempt != nil {
- cs.attempt.finish(err)
- // after functions all rely upon having a stream.
- if cs.attempt.s != nil {
- for _, o := range cs.opts {
- o.after(cs.callInfo)
- }
- }
- }
cs.cancel()
}
@@ -851,7 +942,7 @@
return io.EOF
}
if a.statsHandler != nil {
- a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now()))
+ a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now()))
}
if channelz.IsOn() {
a.t.IncrMsgSent()
@@ -899,13 +990,13 @@
a.mu.Unlock()
}
if a.statsHandler != nil {
- a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{
+ a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{
Client: true,
RecvTime: time.Now(),
Payload: m,
// TODO truncate large payload.
Data: payInfo.uncompressedBytes,
- WireLength: payInfo.wireLength,
+ WireLength: payInfo.wireLength + headerLen,
Length: len(payInfo.uncompressedBytes),
})
}
@@ -961,12 +1052,12 @@
if a.statsHandler != nil {
end := &stats.End{
Client: true,
- BeginTime: a.cs.beginTime,
+ BeginTime: a.beginTime,
EndTime: time.Now(),
Trailer: tr,
Error: err,
}
- a.statsHandler.HandleRPC(a.cs.ctx, end)
+ a.statsHandler.HandleRPC(a.ctx, end)
}
if a.trInfo != nil && a.trInfo.tr != nil {
if err == nil {
@@ -1066,7 +1157,6 @@
t: t,
}
- as.callInfo.stream = as
s, err := as.t.NewStream(as.ctx, as.callHdr)
if err != nil {
err = toRPCErr(err)
@@ -1488,7 +1578,7 @@
Payload: m,
// TODO truncate large payload.
Data: payInfo.uncompressedBytes,
- WireLength: payInfo.wireLength,
+ WireLength: payInfo.wireLength + headerLen,
Length: len(payInfo.uncompressedBytes),
})
}
diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go
index 584360f..dbf34e6 100644
--- a/vendor/google.golang.org/grpc/tap/tap.go
+++ b/vendor/google.golang.org/grpc/tap/tap.go
@@ -17,7 +17,12 @@
*/
// Package tap defines the function handles which are executed on the transport
-// layer of gRPC-Go and related information. Everything here is EXPERIMENTAL.
+// layer of gRPC-Go and related information.
+//
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
package tap
import (
@@ -32,16 +37,16 @@
// TODO: More to be added.
}
-// ServerInHandle defines the function which runs before a new stream is created
-// on the server side. If it returns a non-nil error, the stream will not be
-// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM.
-// The client will receive an RPC error "code = Unavailable, desc = stream
-// terminated by RST_STREAM with error code: REFUSED_STREAM".
+// ServerInHandle defines the function which runs before a new stream is
+// created on the server side. If it returns a non-nil error, the stream will
+// not be created and an error will be returned to the client. If the error
+// returned is a status error, that status code and message will be used,
+// otherwise PermissionDenied will be the code and err.Error() will be the
+// message.
//
// It's intended to be used in situations where you don't want to waste the
-// resources to accept the new stream (e.g. rate-limiting). And the content of
-// the error will be ignored and won't be sent back to the client. For other
-// general usages, please use interceptors.
+// resources to accept the new stream (e.g. rate-limiting). For other general
+// usages, please use interceptors.
//
// Note that it is executed in the per-connection I/O goroutine(s) instead of
// per-RPC goroutine. Therefore, users should NOT have any
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index ca5d55f..9d3fd73 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.29.1"
+const Version = "1.44.1-dev"
diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh
index e12024f..d923187 100644
--- a/vendor/google.golang.org/grpc/vet.sh
+++ b/vendor/google.golang.org/grpc/vet.sh
@@ -28,40 +28,35 @@
}
trap cleanup EXIT
-PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}"
+PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}"
+go version
if [[ "$1" = "-install" ]]; then
- # Check for module support
- if go help mod >& /dev/null; then
- # Install the pinned versions as defined in module tools.
- pushd ./test/tools
- go install \
- golang.org/x/lint/golint \
- golang.org/x/tools/cmd/goimports \
- honnef.co/go/tools/cmd/staticcheck \
- github.com/client9/misspell/cmd/misspell \
- github.com/golang/protobuf/protoc-gen-go
- popd
- else
- # Ye olde `go get` incantation.
- # Note: this gets the latest version of all tools (vs. the pinned versions
- # with Go modules).
- go get -u \
- golang.org/x/lint/golint \
- golang.org/x/tools/cmd/goimports \
- honnef.co/go/tools/cmd/staticcheck \
- github.com/client9/misspell/cmd/misspell \
- github.com/golang/protobuf/protoc-gen-go
- fi
+ # Install the pinned versions as defined in module tools.
+ pushd ./test/tools
+ go install \
+ golang.org/x/lint/golint \
+ golang.org/x/tools/cmd/goimports \
+ honnef.co/go/tools/cmd/staticcheck \
+ github.com/client9/misspell/cmd/misspell
+ popd
if [[ -z "${VET_SKIP_PROTO}" ]]; then
if [[ "${TRAVIS}" = "true" ]]; then
- PROTOBUF_VERSION=3.3.0
+ PROTOBUF_VERSION=3.14.0
PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
pushd /home/travis
wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
unzip ${PROTOC_FILENAME}
bin/protoc --version
popd
+ elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then
+ PROTOBUF_VERSION=3.14.0
+ PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
+ pushd /home/runner/go
+ wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
+ unzip ${PROTOC_FILENAME}
+ bin/protoc --version
+ popd
elif not which protoc > /dev/null; then
die "Please install protoc into your path"
fi
@@ -85,18 +80,14 @@
# thread safety.
git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test'
+# - Do not call grpclog directly. Use grpclog.Component instead.
+git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
+
# - Ensure all ptypes proto packages are renamed when importing.
not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go"
-# - Check imports that are illegal in appengine (until Go 1.11).
-# TODO: Remove when we drop Go 1.10 support
-go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go
-
-# - gofmt, goimports, golint (with exceptions for generated code), go vet.
-gofmt -s -d -l . 2>&1 | fail_on_output
-goimports -l . 2>&1 | not grep -vE "(_mock|\.pb)\.go"
-golint ./... 2>&1 | not grep -vE "(_mock|\.pb)\.go:"
-go vet -all ./...
+# - Ensure all xds proto imports are renamed to *pb or *grpc.
+git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "'
misspell -error .
@@ -107,12 +98,22 @@
(git status; git --no-pager diff; exit 1)
fi
-# - Check that our module is tidy.
-if go help mod >& /dev/null; then
- go mod tidy && \
- git status --porcelain 2>&1 | fail_on_output || \
+# - gofmt, goimports, golint (with exceptions for generated code), go vet,
+# go mod tidy.
+# Perform these checks on each module inside gRPC.
+for MOD_FILE in $(find . -name 'go.mod'); do
+ MOD_DIR=$(dirname ${MOD_FILE})
+ pushd ${MOD_DIR}
+ go vet -all ./... | fail_on_output
+ gofmt -s -d -l . 2>&1 | fail_on_output
+ goimports -l . 2>&1 | not grep -vE "\.pb\.go"
+ golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:"
+
+ go mod tidy
+ git status --porcelain 2>&1 | fail_on_output || \
(git status; git --no-pager diff; exit 1)
-fi
+ popd
+done
# - Collection of static analysis checks
#
@@ -123,18 +124,21 @@
# Error if anything other than deprecation warnings are printed.
not grep -v "is deprecated:.*SA1019" "${SC_OUT}"
# Only ignore the following deprecated types/fields/functions.
-not grep -Fv '.HandleResolvedAddrs
-.HandleSubConnStateChange
+not grep -Fv '.CredsBundle
.HeaderMap
+.Metadata is deprecated: use Attributes
.NewAddress
.NewServiceConfig
-.Metadata is deprecated: use Attributes
.Type is deprecated: use Attributes
-.UpdateBalancerState
+BuildVersion is deprecated
+balancer.ErrTransientFailure
balancer.Picker
+extDesc.Filename is deprecated
+github.com/golang/protobuf/jsonpb is deprecated
grpc.CallCustomCodec
grpc.Code
grpc.Compressor
+grpc.CustomCodec
grpc.Decompressor
grpc.MaxMsgSize
grpc.MethodConfig
@@ -142,9 +146,7 @@
grpc.NewGZIPDecompressor
grpc.RPCCompressor
grpc.RPCDecompressor
-grpc.RoundRobin
grpc.ServiceConfig
-grpc.WithBalancer
grpc.WithBalancerName
grpc.WithCompressor
grpc.WithDecompressor
@@ -154,10 +156,56 @@
grpc.WithTimeout
http.CloseNotifier
info.SecurityVersion
-naming.Resolver
-naming.Update
-naming.Watcher
+proto is deprecated
+proto.InternalMessageInfo is deprecated
+proto.EnumName is deprecated
+proto.ErrInternalBadWireType is deprecated
+proto.FileDescriptor is deprecated
+proto.Marshaler is deprecated
+proto.MessageType is deprecated
+proto.RegisterEnum is deprecated
+proto.RegisterFile is deprecated
+proto.RegisterType is deprecated
+proto.RegisterExtension is deprecated
+proto.RegisteredExtension is deprecated
+proto.RegisteredExtensions is deprecated
+proto.RegisterMapType is deprecated
+proto.Unmarshaler is deprecated
resolver.Backend
-resolver.GRPCLB' "${SC_OUT}"
+resolver.GRPCLB
+Target is deprecated: Use the Target field in the BuildOptions instead.
+xxx_messageInfo_
+' "${SC_OUT}"
+
+# - special golint on package comments.
+lint_package_comment_per_package() {
+ # Number of files in this go package.
+ fileCount=$(go list -f '{{len .GoFiles}}' $1)
+ if [ ${fileCount} -eq 0 ]; then
+ return 0
+ fi
+ # Number of package errors generated by golint.
+ lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment")
+ # golint complains about every file that's missing the package comment. If the
+ # number of files for this package is greater than the number of errors, there's
+ # at least one file with package comment, good. Otherwise, fail.
+ if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then
+ echo "Package $1 (with ${fileCount} files) is missing package comment"
+ return 1
+ fi
+}
+lint_package_comment() {
+ set +ex
+
+ count=0
+ for i in $(go list ./...); do
+ lint_package_comment_per_package "$i"
+ ((count += $?))
+ done
+
+ set -ex
+ return $count
+}
+lint_package_comment
echo SUCCESS
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
index cde2c20..179d6e8 100644
--- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
@@ -6,22 +6,23 @@
import (
"fmt"
- "strings"
"unicode/utf8"
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/encoding/text"
"google.golang.org/protobuf/internal/errors"
- "google.golang.org/protobuf/internal/fieldnum"
"google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/internal/set"
+ "google.golang.org/protobuf/internal/strs"
"google.golang.org/protobuf/proto"
pref "google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
// Unmarshal reads the given []byte into the given proto.Message.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
func Unmarshal(b []byte, m proto.Message) error {
return UnmarshalOptions{}.Unmarshal(b, m)
}
@@ -50,9 +51,17 @@
}
}
-// Unmarshal reads the given []byte and populates the given proto.Message using options in
-// UnmarshalOptions object.
+// Unmarshal reads the given []byte and populates the given proto.Message
+// using options in the UnmarshalOptions object.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error {
+ return o.unmarshal(b, m)
+}
+
+// unmarshal is a centralized function that all unmarshal operations go through.
+// For profiling purposes, avoid changing the name of this function or
+// introducing other code paths for unmarshal that do not go through this.
+func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error {
proto.Reset(m)
if o.Resolver == nil {
@@ -100,7 +109,7 @@
return errors.New("no support for proto1 MessageSets")
}
- if messageDesc.FullName() == "google.protobuf.Any" {
+ if messageDesc.FullName() == genid.Any_message_fullname {
return d.unmarshalAny(m, checkDelims)
}
@@ -150,21 +159,11 @@
switch tok.NameKind() {
case text.IdentName:
name = pref.Name(tok.IdentName())
- fd = fieldDescs.ByName(name)
- if fd == nil {
- // The proto name of a group field is in all lowercase,
- // while the textproto field name is the group message name.
- gd := fieldDescs.ByName(pref.Name(strings.ToLower(string(name))))
- if gd != nil && gd.Kind() == pref.GroupKind && gd.Message().Name() == name {
- fd = gd
- }
- } else if fd.Kind() == pref.GroupKind && fd.Message().Name() != name {
- fd = nil // reset since field name is actually the message name
- }
+ fd = fieldDescs.ByTextName(string(name))
case text.TypeName:
// Handle extensions only. This code path is not for Any.
- xt, xtErr = d.findExtension(pref.FullName(tok.TypeName()))
+ xt, xtErr = d.opts.Resolver.FindExtensionByName(pref.FullName(tok.TypeName()))
case text.FieldNumber:
isFieldNumberName = true
@@ -261,15 +260,6 @@
return nil
}
-// findExtension returns protoreflect.ExtensionType from the Resolver if found.
-func (d decoder) findExtension(xtName pref.FullName) (pref.ExtensionType, error) {
- xt, err := d.opts.Resolver.FindExtensionByName(xtName)
- if err == nil {
- return xt, nil
- }
- return messageset.FindMessageSetExtension(d.opts.Resolver, xtName)
-}
-
// unmarshalSingular unmarshals a non-repeated field value specified by the
// given FieldDescriptor.
func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error {
@@ -339,10 +329,10 @@
case pref.StringKind:
if s, ok := tok.String(); ok {
- if utf8.ValidString(s) {
- return pref.ValueOfString(s), nil
+ if strs.EnforceUTF8(fd) && !utf8.ValidString(s) {
+ return pref.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8")
}
- return pref.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8")
+ return pref.ValueOfString(s), nil
}
case pref.BytesKind:
@@ -530,14 +520,13 @@
return d.unexpectedTokenError(tok)
}
- name := tok.IdentName()
- switch name {
- case "key":
+ switch name := pref.Name(tok.IdentName()); name {
+ case genid.MapEntry_Key_field_name:
if !tok.HasSeparator() {
return d.syntaxError(tok.Pos(), "missing field separator :")
}
if key.IsValid() {
- return d.newError(tok.Pos(), `map entry "key" cannot be repeated`)
+ return d.newError(tok.Pos(), "map entry %q cannot be repeated", name)
}
val, err := d.unmarshalScalar(fd.MapKey())
if err != nil {
@@ -545,14 +534,14 @@
}
key = val.MapKey()
- case "value":
+ case genid.MapEntry_Value_field_name:
if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) {
if !tok.HasSeparator() {
return d.syntaxError(tok.Pos(), "missing field separator :")
}
}
if pval.IsValid() {
- return d.newError(tok.Pos(), `map entry "value" cannot be repeated`)
+ return d.newError(tok.Pos(), "map entry %q cannot be repeated", name)
}
pval, err = unmarshalMapValue()
if err != nil {
@@ -589,13 +578,9 @@
func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error {
var typeURL string
var bValue []byte
-
- // hasFields tracks which valid fields have been seen in the loop below in
- // order to flag an error if there are duplicates or conflicts. It may
- // contain the strings "type_url", "value" and "expanded". The literal
- // "expanded" is used to indicate that the expanded form has been
- // encountered already.
- hasFields := map[string]bool{}
+ var seenTypeUrl bool
+ var seenValue bool
+ var isExpanded bool
if checkDelims {
tok, err := d.Read()
@@ -634,12 +619,12 @@
return d.syntaxError(tok.Pos(), "missing field separator :")
}
- switch tok.IdentName() {
- case "type_url":
- if hasFields["type_url"] {
- return d.newError(tok.Pos(), "duplicate Any type_url field")
+ switch name := pref.Name(tok.IdentName()); name {
+ case genid.Any_TypeUrl_field_name:
+ if seenTypeUrl {
+ return d.newError(tok.Pos(), "duplicate %v field", genid.Any_TypeUrl_field_fullname)
}
- if hasFields["expanded"] {
+ if isExpanded {
return d.newError(tok.Pos(), "conflict with [%s] field", typeURL)
}
tok, err := d.Read()
@@ -649,15 +634,15 @@
var ok bool
typeURL, ok = tok.String()
if !ok {
- return d.newError(tok.Pos(), "invalid Any type_url: %v", tok.RawString())
+ return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_TypeUrl_field_fullname, tok.RawString())
}
- hasFields["type_url"] = true
+ seenTypeUrl = true
- case "value":
- if hasFields["value"] {
- return d.newError(tok.Pos(), "duplicate Any value field")
+ case genid.Any_Value_field_name:
+ if seenValue {
+ return d.newError(tok.Pos(), "duplicate %v field", genid.Any_Value_field_fullname)
}
- if hasFields["expanded"] {
+ if isExpanded {
return d.newError(tok.Pos(), "conflict with [%s] field", typeURL)
}
tok, err := d.Read()
@@ -666,22 +651,22 @@
}
s, ok := tok.String()
if !ok {
- return d.newError(tok.Pos(), "invalid Any value: %v", tok.RawString())
+ return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_Value_field_fullname, tok.RawString())
}
bValue = []byte(s)
- hasFields["value"] = true
+ seenValue = true
default:
if !d.opts.DiscardUnknown {
- return d.newError(tok.Pos(), "invalid field name %q in google.protobuf.Any message", tok.RawString())
+ return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname)
}
}
case text.TypeName:
- if hasFields["expanded"] {
+ if isExpanded {
return d.newError(tok.Pos(), "cannot have more than one type")
}
- if hasFields["type_url"] {
+ if seenTypeUrl {
return d.newError(tok.Pos(), "conflict with type_url field")
}
typeURL = tok.TypeName()
@@ -690,21 +675,21 @@
if err != nil {
return err
}
- hasFields["expanded"] = true
+ isExpanded = true
default:
if !d.opts.DiscardUnknown {
- return d.newError(tok.Pos(), "invalid field name %q in google.protobuf.Any message", tok.RawString())
+ return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname)
}
}
}
fds := m.Descriptor().Fields()
if len(typeURL) > 0 {
- m.Set(fds.ByNumber(fieldnum.Any_TypeUrl), pref.ValueOfString(typeURL))
+ m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), pref.ValueOfString(typeURL))
}
if len(bValue) > 0 {
- m.Set(fds.ByNumber(fieldnum.Any_Value), pref.ValueOfBytes(bValue))
+ m.Set(fds.ByNumber(genid.Any_Value_field_number), pref.ValueOfBytes(bValue))
}
return nil
}
@@ -759,9 +744,6 @@
// Skip items. This will not validate whether skipped values are
// of the same type or not, same behavior as C++
// TextFormat::Parser::AllowUnknownField(true) version 3.8.0.
- if err := d.skipValue(); err != nil {
- return err
- }
}
}
}
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
index 83b65a6..8d5304d 100644
--- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
@@ -6,7 +6,6 @@
import (
"fmt"
- "sort"
"strconv"
"unicode/utf8"
@@ -14,11 +13,13 @@
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/encoding/text"
"google.golang.org/protobuf/internal/errors"
- "google.golang.org/protobuf/internal/fieldnum"
"google.golang.org/protobuf/internal/flags"
- "google.golang.org/protobuf/internal/mapsort"
+ "google.golang.org/protobuf/internal/genid"
+ "google.golang.org/protobuf/internal/order"
"google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/internal/strs"
"google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
pref "google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
@@ -55,6 +56,15 @@
// Indent can only be composed of space or tab characters.
Indent string
+ // EmitASCII specifies whether to format strings and bytes as ASCII only
+ // as opposed to using UTF-8 encoding when possible.
+ EmitASCII bool
+
+ // allowInvalidUTF8 specifies whether to permit the encoding of strings
+ // with invalid UTF-8. This is unexported as it is intended to only
+ // be specified by the Format method.
+ allowInvalidUTF8 bool
+
// AllowPartial allows messages that have missing required fields to marshal
// without returning an error. If AllowPartial is false (the default),
// Marshal will return error if there are any missing required fields.
@@ -81,6 +91,7 @@
if m == nil || !m.ProtoReflect().IsValid() {
return "<nil>" // invalid syntax, but okay since this is for debugging
}
+ o.allowInvalidUTF8 = true
o.AllowPartial = true
o.EmitUnknown = true
b, _ := o.Marshal(m)
@@ -91,7 +102,13 @@
// MarshalOptions object. Do not depend on the output being stable. It may
// change over time across different versions of the program.
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
- const outputASCII = false
+ return o.marshal(m)
+}
+
+// marshal is a centralized function that all marshal operations go through.
+// For profiling purposes, avoid changing the name of this function or
+// introducing other code paths for marshal that do not go through this.
+func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) {
var delims = [2]byte{'{', '}'}
if o.Multiline && o.Indent == "" {
@@ -101,11 +118,17 @@
o.Resolver = protoregistry.GlobalTypes
}
- internalEnc, err := text.NewEncoder(o.Indent, delims, outputASCII)
+ internalEnc, err := text.NewEncoder(o.Indent, delims, o.EmitASCII)
if err != nil {
return nil, err
}
+ // Treat nil message interface as an empty message,
+ // in which case there is nothing to output.
+ if m == nil {
+ return []byte{}, nil
+ }
+
enc := encoder{internalEnc, o}
err = enc.marshalMessage(m.ProtoReflect(), false)
if err != nil {
@@ -139,42 +162,22 @@
}
// Handle Any expansion.
- if messageDesc.FullName() == "google.protobuf.Any" {
+ if messageDesc.FullName() == genid.Any_message_fullname {
if e.marshalAny(m) {
return nil
}
// If unable to expand, continue on to marshal Any as a regular message.
}
- // Marshal known fields.
- fieldDescs := messageDesc.Fields()
- size := fieldDescs.Len()
- for i := 0; i < size; {
- fd := fieldDescs.Get(i)
- if od := fd.ContainingOneof(); od != nil {
- fd = m.WhichOneof(od)
- i += od.Fields().Len()
- } else {
- i++
+ // Marshal fields.
+ var err error
+ order.RangeFields(m, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ if err = e.marshalField(fd.TextName(), v, fd); err != nil {
+ return false
}
-
- if fd == nil || !m.Has(fd) {
- continue
- }
-
- name := fd.Name()
- // Use type name for group field name.
- if fd.Kind() == pref.GroupKind {
- name = fd.Message().Name()
- }
- val := m.Get(fd)
- if err := e.marshalField(string(name), val, fd); err != nil {
- return err
- }
- }
-
- // Marshal extensions.
- if err := e.marshalExtensions(m); err != nil {
+ return true
+ })
+ if err != nil {
return err
}
@@ -209,7 +212,7 @@
case pref.StringKind:
s := val.String()
- if !utf8.ValidString(s) {
+ if !e.opts.allowInvalidUTF8 && strs.EnforceUTF8(fd) && !utf8.ValidString(s) {
return errors.InvalidUTF8(string(fd.FullName()))
}
e.WriteString(s)
@@ -267,18 +270,18 @@
// marshalMap marshals the given protoreflect.Map as multiple name-value fields.
func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error {
var err error
- mapsort.Range(mmap, fd.MapKey().Kind(), func(key pref.MapKey, val pref.Value) bool {
+ order.RangeEntries(mmap, order.GenericKeyOrder, func(key pref.MapKey, val pref.Value) bool {
e.WriteName(name)
e.StartMessage()
defer e.EndMessage()
- e.WriteName("key")
+ e.WriteName(string(genid.MapEntry_Key_field_name))
err = e.marshalSingular(key.Value(), fd.MapKey())
if err != nil {
return false
}
- e.WriteName("value")
+ e.WriteName(string(genid.MapEntry_Value_field_name))
err = e.marshalSingular(val, fd.MapValue())
if err != nil {
return false
@@ -288,48 +291,6 @@
return err
}
-// marshalExtensions marshals extension fields.
-func (e encoder) marshalExtensions(m pref.Message) error {
- type entry struct {
- key string
- value pref.Value
- desc pref.FieldDescriptor
- }
-
- // Get a sorted list based on field key first.
- var entries []entry
- m.Range(func(fd pref.FieldDescriptor, v pref.Value) bool {
- if !fd.IsExtension() {
- return true
- }
- // For MessageSet extensions, the name used is the parent message.
- name := fd.FullName()
- if messageset.IsMessageSetExtension(fd) {
- name = name.Parent()
- }
- entries = append(entries, entry{
- key: string(name),
- value: v,
- desc: fd,
- })
- return true
- })
- // Sort extensions lexicographically.
- sort.Slice(entries, func(i, j int) bool {
- return entries[i].key < entries[j].key
- })
-
- // Write out sorted list.
- for _, entry := range entries {
- // Extension field name is the proto field name enclosed in [].
- name := "[" + entry.key + "]"
- if err := e.marshalField(name, entry.value, entry.desc); err != nil {
- return err
- }
- }
- return nil
-}
-
// marshalUnknown parses the given []byte and marshals fields out.
// This function assumes proper encoding in the given []byte.
func (e encoder) marshalUnknown(b []byte) {
@@ -376,7 +337,7 @@
func (e encoder) marshalAny(any pref.Message) bool {
// Construct the embedded message.
fds := any.Descriptor().Fields()
- fdType := fds.ByNumber(fieldnum.Any_TypeUrl)
+ fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
typeURL := any.Get(fdType).String()
mt, err := e.opts.Resolver.FindMessageByURL(typeURL)
if err != nil {
@@ -385,7 +346,7 @@
m := mt.New().Interface()
// Unmarshal bytes into embedded message.
- fdValue := fds.ByNumber(fieldnum.Any_Value)
+ fdValue := fds.ByNumber(genid.Any_Value_field_number)
value := any.Get(fdValue)
err = proto.UnmarshalOptions{
AllowPartial: true,
diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
index 4cb8d6d..360c633 100644
--- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
+++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
@@ -42,6 +42,8 @@
name = "FileImports"
case pref.Descriptor:
name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s"
+ default:
+ name = reflect.ValueOf(vs).Elem().Type().Name()
}
start, end = name+"{", "}"
}
@@ -106,7 +108,7 @@
var descriptorAccessors = map[reflect.Type][]string{
reflect.TypeOf((*pref.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"},
reflect.TypeOf((*pref.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"},
- reflect.TypeOf((*pref.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "IsPacked", "IsExtension", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"},
+ reflect.TypeOf((*pref.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"},
reflect.TypeOf((*pref.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt
reflect.TypeOf((*pref.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"},
reflect.TypeOf((*pref.EnumValueDescriptor)(nil)).Elem(): {"Number"},
diff --git a/vendor/google.golang.org/protobuf/internal/detrand/rand.go b/vendor/google.golang.org/protobuf/internal/detrand/rand.go
index a904dd1..49c8676 100644
--- a/vendor/google.golang.org/protobuf/internal/detrand/rand.go
+++ b/vendor/google.golang.org/protobuf/internal/detrand/rand.go
@@ -26,6 +26,14 @@
return randSeed%2 == 1
}
+// Intn returns a deterministically random integer between 0 and n-1, inclusive.
+func Intn(n int) int {
+ if n <= 0 {
+ panic("must be positive")
+ }
+ return int(randSeed % uint64(n))
+}
+
// randSeed is a best-effort at an approximate hash of the Go binary.
var randSeed = binaryHash()
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go
index b1eeea5..c1866f3 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go
@@ -11,10 +11,9 @@
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/errors"
pref "google.golang.org/protobuf/reflect/protoreflect"
- preg "google.golang.org/protobuf/reflect/protoregistry"
)
-// The MessageSet wire format is equivalent to a message defiend as follows,
+// The MessageSet wire format is equivalent to a message defined as follows,
// where each Item defines an extension field with a field number of 'type_id'
// and content of 'message'. MessageSet extensions must be non-repeated message
// fields.
@@ -48,33 +47,17 @@
return ok && xmd.IsMessageSet()
}
-// IsMessageSetExtension reports this field extends a MessageSet.
+// IsMessageSetExtension reports this field properly extends a MessageSet.
func IsMessageSetExtension(fd pref.FieldDescriptor) bool {
- if fd.Name() != ExtensionName {
+ switch {
+ case fd.Name() != ExtensionName:
+ return false
+ case !IsMessageSet(fd.ContainingMessage()):
+ return false
+ case fd.FullName().Parent() != fd.Message().FullName():
return false
}
- if fd.FullName().Parent() != fd.Message().FullName() {
- return false
- }
- return IsMessageSet(fd.ContainingMessage())
-}
-
-// FindMessageSetExtension locates a MessageSet extension field by name.
-// In text and JSON formats, the extension name used is the message itself.
-// The extension field name is derived by appending ExtensionName.
-func FindMessageSetExtension(r preg.ExtensionTypeResolver, s pref.FullName) (pref.ExtensionType, error) {
- name := s.Append(ExtensionName)
- xt, err := r.FindExtensionByName(name)
- if err != nil {
- if err == preg.NotFound {
- return nil, err
- }
- return nil, errors.Wrap(err, "%q", name)
- }
- if !IsMessageSetExtension(xt.TypeDescriptor()) {
- return nil, preg.NotFound
- }
- return xt, nil
+ return true
}
// SizeField returns the size of a MessageSet item field containing an extension
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
index 16c02d7..38f1931 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
@@ -104,7 +104,7 @@
case strings.HasPrefix(s, "json="):
jsonName := s[len("json="):]
if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) {
- f.L1.JSONName.Init(jsonName)
+ f.L1.StringName.InitJSON(jsonName)
}
case s == "packed":
f.L1.HasPacked = true
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go
index c4ba1c5..da289cc 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go
@@ -32,7 +32,6 @@
encoderState
indent string
- newline string // set to "\n" if len(indent) > 0
delims [2]byte
outputASCII bool
}
@@ -61,7 +60,6 @@
return nil, errors.New("indent may only be composed of space and tab characters")
}
e.indent = indent
- e.newline = "\n"
}
switch delims {
case [2]byte{0, 0}:
@@ -126,7 +124,7 @@
// are used to represent both the proto string and bytes type.
r = rune(in[0])
fallthrough
- case r < ' ' || r == '"' || r == '\\':
+ case r < ' ' || r == '"' || r == '\\' || r == 0x7f:
out = append(out, '\\')
switch r {
case '"', '\\':
@@ -143,7 +141,7 @@
out = strconv.AppendUint(out, uint64(r), 16)
}
in = in[n:]
- case outputASCII && r >= utf8.RuneSelf:
+ case r >= utf8.RuneSelf && (outputASCII || r <= 0x009f):
out = append(out, '\\')
if r <= math.MaxUint16 {
out = append(out, 'u')
@@ -168,7 +166,7 @@
// escaping. If no characters need escaping, this returns the input length.
func indexNeedEscapeInString(s string) int {
for i := 0; i < len(s); i++ {
- if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= utf8.RuneSelf {
+ if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= 0x7f {
return i
}
}
@@ -265,3 +263,8 @@
func (e *Encoder) Reset(es encoderState) {
e.encoderState = es
}
+
+// AppendString appends the escaped form of the input string to b.
+func AppendString(b []byte, s string) []byte {
+ return appendString(b, s, false)
+}
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go
deleted file mode 100644
index 74c5fef..0000000
--- a/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Code generated by generate-protos. DO NOT EDIT.
-
-package fieldnum
-
-// Field numbers for google.protobuf.Any.
-const (
- Any_TypeUrl = 1 // optional string
- Any_Value = 2 // optional bytes
-)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go
deleted file mode 100644
index 9a6b5f2..0000000
--- a/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Code generated by generate-protos. DO NOT EDIT.
-
-package fieldnum
-
-// Field numbers for google.protobuf.Api.
-const (
- Api_Name = 1 // optional string
- Api_Methods = 2 // repeated google.protobuf.Method
- Api_Options = 3 // repeated google.protobuf.Option
- Api_Version = 4 // optional string
- Api_SourceContext = 5 // optional google.protobuf.SourceContext
- Api_Mixins = 6 // repeated google.protobuf.Mixin
- Api_Syntax = 7 // optional google.protobuf.Syntax
-)
-
-// Field numbers for google.protobuf.Method.
-const (
- Method_Name = 1 // optional string
- Method_RequestTypeUrl = 2 // optional string
- Method_RequestStreaming = 3 // optional bool
- Method_ResponseTypeUrl = 4 // optional string
- Method_ResponseStreaming = 5 // optional bool
- Method_Options = 6 // repeated google.protobuf.Option
- Method_Syntax = 7 // optional google.protobuf.Syntax
-)
-
-// Field numbers for google.protobuf.Mixin.
-const (
- Mixin_Name = 1 // optional string
- Mixin_Root = 2 // optional string
-)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go
deleted file mode 100644
index 1629515..0000000
--- a/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Code generated by generate-protos. DO NOT EDIT.
-
-package fieldnum
-
-// Field numbers for google.protobuf.FileDescriptorSet.
-const (
- FileDescriptorSet_File = 1 // repeated google.protobuf.FileDescriptorProto
-)
-
-// Field numbers for google.protobuf.FileDescriptorProto.
-const (
- FileDescriptorProto_Name = 1 // optional string
- FileDescriptorProto_Package = 2 // optional string
- FileDescriptorProto_Dependency = 3 // repeated string
- FileDescriptorProto_PublicDependency = 10 // repeated int32
- FileDescriptorProto_WeakDependency = 11 // repeated int32
- FileDescriptorProto_MessageType = 4 // repeated google.protobuf.DescriptorProto
- FileDescriptorProto_EnumType = 5 // repeated google.protobuf.EnumDescriptorProto
- FileDescriptorProto_Service = 6 // repeated google.protobuf.ServiceDescriptorProto
- FileDescriptorProto_Extension = 7 // repeated google.protobuf.FieldDescriptorProto
- FileDescriptorProto_Options = 8 // optional google.protobuf.FileOptions
- FileDescriptorProto_SourceCodeInfo = 9 // optional google.protobuf.SourceCodeInfo
- FileDescriptorProto_Syntax = 12 // optional string
-)
-
-// Field numbers for google.protobuf.DescriptorProto.
-const (
- DescriptorProto_Name = 1 // optional string
- DescriptorProto_Field = 2 // repeated google.protobuf.FieldDescriptorProto
- DescriptorProto_Extension = 6 // repeated google.protobuf.FieldDescriptorProto
- DescriptorProto_NestedType = 3 // repeated google.protobuf.DescriptorProto
- DescriptorProto_EnumType = 4 // repeated google.protobuf.EnumDescriptorProto
- DescriptorProto_ExtensionRange = 5 // repeated google.protobuf.DescriptorProto.ExtensionRange
- DescriptorProto_OneofDecl = 8 // repeated google.protobuf.OneofDescriptorProto
- DescriptorProto_Options = 7 // optional google.protobuf.MessageOptions
- DescriptorProto_ReservedRange = 9 // repeated google.protobuf.DescriptorProto.ReservedRange
- DescriptorProto_ReservedName = 10 // repeated string
-)
-
-// Field numbers for google.protobuf.DescriptorProto.ExtensionRange.
-const (
- DescriptorProto_ExtensionRange_Start = 1 // optional int32
- DescriptorProto_ExtensionRange_End = 2 // optional int32
- DescriptorProto_ExtensionRange_Options = 3 // optional google.protobuf.ExtensionRangeOptions
-)
-
-// Field numbers for google.protobuf.DescriptorProto.ReservedRange.
-const (
- DescriptorProto_ReservedRange_Start = 1 // optional int32
- DescriptorProto_ReservedRange_End = 2 // optional int32
-)
-
-// Field numbers for google.protobuf.ExtensionRangeOptions.
-const (
- ExtensionRangeOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
-)
-
-// Field numbers for google.protobuf.FieldDescriptorProto.
-const (
- FieldDescriptorProto_Name = 1 // optional string
- FieldDescriptorProto_Number = 3 // optional int32
- FieldDescriptorProto_Label = 4 // optional google.protobuf.FieldDescriptorProto.Label
- FieldDescriptorProto_Type = 5 // optional google.protobuf.FieldDescriptorProto.Type
- FieldDescriptorProto_TypeName = 6 // optional string
- FieldDescriptorProto_Extendee = 2 // optional string
- FieldDescriptorProto_DefaultValue = 7 // optional string
- FieldDescriptorProto_OneofIndex = 9 // optional int32
- FieldDescriptorProto_JsonName = 10 // optional string
- FieldDescriptorProto_Options = 8 // optional google.protobuf.FieldOptions
-)
-
-// Field numbers for google.protobuf.OneofDescriptorProto.
-const (
- OneofDescriptorProto_Name = 1 // optional string
- OneofDescriptorProto_Options = 2 // optional google.protobuf.OneofOptions
-)
-
-// Field numbers for google.protobuf.EnumDescriptorProto.
-const (
- EnumDescriptorProto_Name = 1 // optional string
- EnumDescriptorProto_Value = 2 // repeated google.protobuf.EnumValueDescriptorProto
- EnumDescriptorProto_Options = 3 // optional google.protobuf.EnumOptions
- EnumDescriptorProto_ReservedRange = 4 // repeated google.protobuf.EnumDescriptorProto.EnumReservedRange
- EnumDescriptorProto_ReservedName = 5 // repeated string
-)
-
-// Field numbers for google.protobuf.EnumDescriptorProto.EnumReservedRange.
-const (
- EnumDescriptorProto_EnumReservedRange_Start = 1 // optional int32
- EnumDescriptorProto_EnumReservedRange_End = 2 // optional int32
-)
-
-// Field numbers for google.protobuf.EnumValueDescriptorProto.
-const (
- EnumValueDescriptorProto_Name = 1 // optional string
- EnumValueDescriptorProto_Number = 2 // optional int32
- EnumValueDescriptorProto_Options = 3 // optional google.protobuf.EnumValueOptions
-)
-
-// Field numbers for google.protobuf.ServiceDescriptorProto.
-const (
- ServiceDescriptorProto_Name = 1 // optional string
- ServiceDescriptorProto_Method = 2 // repeated google.protobuf.MethodDescriptorProto
- ServiceDescriptorProto_Options = 3 // optional google.protobuf.ServiceOptions
-)
-
-// Field numbers for google.protobuf.MethodDescriptorProto.
-const (
- MethodDescriptorProto_Name = 1 // optional string
- MethodDescriptorProto_InputType = 2 // optional string
- MethodDescriptorProto_OutputType = 3 // optional string
- MethodDescriptorProto_Options = 4 // optional google.protobuf.MethodOptions
- MethodDescriptorProto_ClientStreaming = 5 // optional bool
- MethodDescriptorProto_ServerStreaming = 6 // optional bool
-)
-
-// Field numbers for google.protobuf.FileOptions.
-const (
- FileOptions_JavaPackage = 1 // optional string
- FileOptions_JavaOuterClassname = 8 // optional string
- FileOptions_JavaMultipleFiles = 10 // optional bool
- FileOptions_JavaGenerateEqualsAndHash = 20 // optional bool
- FileOptions_JavaStringCheckUtf8 = 27 // optional bool
- FileOptions_OptimizeFor = 9 // optional google.protobuf.FileOptions.OptimizeMode
- FileOptions_GoPackage = 11 // optional string
- FileOptions_CcGenericServices = 16 // optional bool
- FileOptions_JavaGenericServices = 17 // optional bool
- FileOptions_PyGenericServices = 18 // optional bool
- FileOptions_PhpGenericServices = 42 // optional bool
- FileOptions_Deprecated = 23 // optional bool
- FileOptions_CcEnableArenas = 31 // optional bool
- FileOptions_ObjcClassPrefix = 36 // optional string
- FileOptions_CsharpNamespace = 37 // optional string
- FileOptions_SwiftPrefix = 39 // optional string
- FileOptions_PhpClassPrefix = 40 // optional string
- FileOptions_PhpNamespace = 41 // optional string
- FileOptions_PhpMetadataNamespace = 44 // optional string
- FileOptions_RubyPackage = 45 // optional string
- FileOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
-)
-
-// Field numbers for google.protobuf.MessageOptions.
-const (
- MessageOptions_MessageSetWireFormat = 1 // optional bool
- MessageOptions_NoStandardDescriptorAccessor = 2 // optional bool
- MessageOptions_Deprecated = 3 // optional bool
- MessageOptions_MapEntry = 7 // optional bool
- MessageOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
-)
-
-// Field numbers for google.protobuf.FieldOptions.
-const (
- FieldOptions_Ctype = 1 // optional google.protobuf.FieldOptions.CType
- FieldOptions_Packed = 2 // optional bool
- FieldOptions_Jstype = 6 // optional google.protobuf.FieldOptions.JSType
- FieldOptions_Lazy = 5 // optional bool
- FieldOptions_Deprecated = 3 // optional bool
- FieldOptions_Weak = 10 // optional bool
- FieldOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
-)
-
-// Field numbers for google.protobuf.OneofOptions.
-const (
- OneofOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
-)
-
-// Field numbers for google.protobuf.EnumOptions.
-const (
- EnumOptions_AllowAlias = 2 // optional bool
- EnumOptions_Deprecated = 3 // optional bool
- EnumOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
-)
-
-// Field numbers for google.protobuf.EnumValueOptions.
-const (
- EnumValueOptions_Deprecated = 1 // optional bool
- EnumValueOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
-)
-
-// Field numbers for google.protobuf.ServiceOptions.
-const (
- ServiceOptions_Deprecated = 33 // optional bool
- ServiceOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
-)
-
-// Field numbers for google.protobuf.MethodOptions.
-const (
- MethodOptions_Deprecated = 33 // optional bool
- MethodOptions_IdempotencyLevel = 34 // optional google.protobuf.MethodOptions.IdempotencyLevel
- MethodOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
-)
-
-// Field numbers for google.protobuf.UninterpretedOption.
-const (
- UninterpretedOption_Name = 2 // repeated google.protobuf.UninterpretedOption.NamePart
- UninterpretedOption_IdentifierValue = 3 // optional string
- UninterpretedOption_PositiveIntValue = 4 // optional uint64
- UninterpretedOption_NegativeIntValue = 5 // optional int64
- UninterpretedOption_DoubleValue = 6 // optional double
- UninterpretedOption_StringValue = 7 // optional bytes
- UninterpretedOption_AggregateValue = 8 // optional string
-)
-
-// Field numbers for google.protobuf.UninterpretedOption.NamePart.
-const (
- UninterpretedOption_NamePart_NamePart = 1 // required string
- UninterpretedOption_NamePart_IsExtension = 2 // required bool
-)
-
-// Field numbers for google.protobuf.SourceCodeInfo.
-const (
- SourceCodeInfo_Location = 1 // repeated google.protobuf.SourceCodeInfo.Location
-)
-
-// Field numbers for google.protobuf.SourceCodeInfo.Location.
-const (
- SourceCodeInfo_Location_Path = 1 // repeated int32
- SourceCodeInfo_Location_Span = 2 // repeated int32
- SourceCodeInfo_Location_LeadingComments = 3 // optional string
- SourceCodeInfo_Location_TrailingComments = 4 // optional string
- SourceCodeInfo_Location_LeadingDetachedComments = 6 // repeated string
-)
-
-// Field numbers for google.protobuf.GeneratedCodeInfo.
-const (
- GeneratedCodeInfo_Annotation = 1 // repeated google.protobuf.GeneratedCodeInfo.Annotation
-)
-
-// Field numbers for google.protobuf.GeneratedCodeInfo.Annotation.
-const (
- GeneratedCodeInfo_Annotation_Path = 1 // repeated int32
- GeneratedCodeInfo_Annotation_SourceFile = 2 // optional string
- GeneratedCodeInfo_Annotation_Begin = 3 // optional int32
- GeneratedCodeInfo_Annotation_End = 4 // optional int32
-)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go b/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go
deleted file mode 100644
index e597885..0000000
--- a/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package fieldnum contains constants for field numbers of fields in messages
-// declared in descriptor.proto and any of the well-known types.
-package fieldnum
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go
deleted file mode 100644
index 8816c73..0000000
--- a/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Code generated by generate-protos. DO NOT EDIT.
-
-package fieldnum
-
-// Field numbers for google.protobuf.Duration.
-const (
- Duration_Seconds = 1 // optional int64
- Duration_Nanos = 2 // optional int32
-)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go
deleted file mode 100644
index b5130a6..0000000
--- a/vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Code generated by generate-protos. DO NOT EDIT.
-
-package fieldnum
-
-// Field numbers for google.protobuf.Empty.
-const ()
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go
deleted file mode 100644
index 7e3bfa2..0000000
--- a/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Code generated by generate-protos. DO NOT EDIT.
-
-package fieldnum
-
-// Field numbers for google.protobuf.FieldMask.
-const (
- FieldMask_Paths = 1 // repeated string
-)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go
deleted file mode 100644
index 241972b..0000000
--- a/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Code generated by generate-protos. DO NOT EDIT.
-
-package fieldnum
-
-// Field numbers for google.protobuf.SourceContext.
-const (
- SourceContext_FileName = 1 // optional string
-)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go
deleted file mode 100644
index c460aab..0000000
--- a/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Code generated by generate-protos. DO NOT EDIT.
-
-package fieldnum
-
-// Field numbers for google.protobuf.Struct.
-const (
- Struct_Fields = 1 // repeated google.protobuf.Struct.FieldsEntry
-)
-
-// Field numbers for google.protobuf.Struct.FieldsEntry.
-const (
- Struct_FieldsEntry_Key = 1 // optional string
- Struct_FieldsEntry_Value = 2 // optional google.protobuf.Value
-)
-
-// Field numbers for google.protobuf.Value.
-const (
- Value_NullValue = 1 // optional google.protobuf.NullValue
- Value_NumberValue = 2 // optional double
- Value_StringValue = 3 // optional string
- Value_BoolValue = 4 // optional bool
- Value_StructValue = 5 // optional google.protobuf.Struct
- Value_ListValue = 6 // optional google.protobuf.ListValue
-)
-
-// Field numbers for google.protobuf.ListValue.
-const (
- ListValue_Values = 1 // repeated google.protobuf.Value
-)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go
deleted file mode 100644
index b4346fb..0000000
--- a/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Code generated by generate-protos. DO NOT EDIT.
-
-package fieldnum
-
-// Field numbers for google.protobuf.Timestamp.
-const (
- Timestamp_Seconds = 1 // optional int64
- Timestamp_Nanos = 2 // optional int32
-)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go
deleted file mode 100644
index b392e95..0000000
--- a/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Code generated by generate-protos. DO NOT EDIT.
-
-package fieldnum
-
-// Field numbers for google.protobuf.Type.
-const (
- Type_Name = 1 // optional string
- Type_Fields = 2 // repeated google.protobuf.Field
- Type_Oneofs = 3 // repeated string
- Type_Options = 4 // repeated google.protobuf.Option
- Type_SourceContext = 5 // optional google.protobuf.SourceContext
- Type_Syntax = 6 // optional google.protobuf.Syntax
-)
-
-// Field numbers for google.protobuf.Field.
-const (
- Field_Kind = 1 // optional google.protobuf.Field.Kind
- Field_Cardinality = 2 // optional google.protobuf.Field.Cardinality
- Field_Number = 3 // optional int32
- Field_Name = 4 // optional string
- Field_TypeUrl = 6 // optional string
- Field_OneofIndex = 7 // optional int32
- Field_Packed = 8 // optional bool
- Field_Options = 9 // repeated google.protobuf.Option
- Field_JsonName = 10 // optional string
- Field_DefaultValue = 11 // optional string
-)
-
-// Field numbers for google.protobuf.Enum.
-const (
- Enum_Name = 1 // optional string
- Enum_Enumvalue = 2 // repeated google.protobuf.EnumValue
- Enum_Options = 3 // repeated google.protobuf.Option
- Enum_SourceContext = 4 // optional google.protobuf.SourceContext
- Enum_Syntax = 5 // optional google.protobuf.Syntax
-)
-
-// Field numbers for google.protobuf.EnumValue.
-const (
- EnumValue_Name = 1 // optional string
- EnumValue_Number = 2 // optional int32
- EnumValue_Options = 3 // repeated google.protobuf.Option
-)
-
-// Field numbers for google.protobuf.Option.
-const (
- Option_Name = 1 // optional string
- Option_Value = 2 // optional google.protobuf.Any
-)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go
deleted file mode 100644
index 42f846a..0000000
--- a/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Code generated by generate-protos. DO NOT EDIT.
-
-package fieldnum
-
-// Field numbers for google.protobuf.DoubleValue.
-const (
- DoubleValue_Value = 1 // optional double
-)
-
-// Field numbers for google.protobuf.FloatValue.
-const (
- FloatValue_Value = 1 // optional float
-)
-
-// Field numbers for google.protobuf.Int64Value.
-const (
- Int64Value_Value = 1 // optional int64
-)
-
-// Field numbers for google.protobuf.UInt64Value.
-const (
- UInt64Value_Value = 1 // optional uint64
-)
-
-// Field numbers for google.protobuf.Int32Value.
-const (
- Int32Value_Value = 1 // optional int32
-)
-
-// Field numbers for google.protobuf.UInt32Value.
-const (
- UInt32Value_Value = 1 // optional uint32
-)
-
-// Field numbers for google.protobuf.BoolValue.
-const (
- BoolValue_Value = 1 // optional bool
-)
-
-// Field numbers for google.protobuf.StringValue.
-const (
- StringValue_Value = 1 // optional string
-)
-
-// Field numbers for google.protobuf.BytesValue.
-const (
- BytesValue_Value = 1 // optional bytes
-)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go b/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go
deleted file mode 100644
index 1cb2d74..0000000
--- a/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package fieldsort defines an ordering of fields.
-//
-// The ordering defined by this package matches the historic behavior of the proto
-// package, placing extensions first and oneofs last.
-//
-// There is no guarantee about stability of the wire encoding, and users should not
-// depend on the order defined in this package as it is subject to change without
-// notice.
-package fieldsort
-
-import (
- "google.golang.org/protobuf/reflect/protoreflect"
-)
-
-// Less returns true if field a comes before field j in ordered wire marshal output.
-func Less(a, b protoreflect.FieldDescriptor) bool {
- ea := a.IsExtension()
- eb := b.IsExtension()
- oa := a.ContainingOneof()
- ob := b.ContainingOneof()
- switch {
- case ea != eb:
- return ea
- case oa != nil && ob != nil:
- if oa == ob {
- return a.Number() < b.Number()
- }
- return oa.Index() < ob.Index()
- case oa != nil:
- return false
- case ob != nil:
- return true
- default:
- return a.Number() < b.Number()
- }
-}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go
index 462d384..b293b69 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/build.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go
@@ -3,11 +3,14 @@
// license that can be found in the LICENSE file.
// Package filedesc provides functionality for constructing descriptors.
+//
+// The types in this package implement interfaces in the protoreflect package
+// related to protobuf descripriptors.
package filedesc
import (
"google.golang.org/protobuf/encoding/protowire"
- "google.golang.org/protobuf/internal/fieldnum"
+ "google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/reflect/protoreflect"
pref "google.golang.org/protobuf/reflect/protoreflect"
preg "google.golang.org/protobuf/reflect/protoregistry"
@@ -126,24 +129,24 @@
b = b[m:]
if isFile {
switch num {
- case fieldnum.FileDescriptorProto_EnumType:
+ case genid.FileDescriptorProto_EnumType_field_number:
db.NumEnums++
- case fieldnum.FileDescriptorProto_MessageType:
+ case genid.FileDescriptorProto_MessageType_field_number:
db.unmarshalCounts(v, false)
db.NumMessages++
- case fieldnum.FileDescriptorProto_Extension:
+ case genid.FileDescriptorProto_Extension_field_number:
db.NumExtensions++
- case fieldnum.FileDescriptorProto_Service:
+ case genid.FileDescriptorProto_Service_field_number:
db.NumServices++
}
} else {
switch num {
- case fieldnum.DescriptorProto_EnumType:
+ case genid.DescriptorProto_EnumType_field_number:
db.NumEnums++
- case fieldnum.DescriptorProto_NestedType:
+ case genid.DescriptorProto_NestedType_field_number:
db.unmarshalCounts(v, false)
db.NumMessages++
- case fieldnum.DescriptorProto_Extension:
+ case genid.DescriptorProto_Extension_field_number:
db.NumExtensions++
}
}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index a9fe07c..98ab142 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -13,6 +13,8 @@
"google.golang.org/protobuf/internal/descfmt"
"google.golang.org/protobuf/internal/descopts"
"google.golang.org/protobuf/internal/encoding/defval"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/internal/strs"
pref "google.golang.org/protobuf/reflect/protoreflect"
@@ -77,7 +79,7 @@
func (fd *File) Messages() pref.MessageDescriptors { return &fd.L1.Messages }
func (fd *File) Extensions() pref.ExtensionDescriptors { return &fd.L1.Extensions }
func (fd *File) Services() pref.ServiceDescriptors { return &fd.L1.Services }
-func (fd *File) SourceLocations() pref.SourceLocations { return &fd.L2.Locations }
+func (fd *File) SourceLocations() pref.SourceLocations { return &fd.lazyInit().Locations }
func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) }
func (fd *File) ProtoType(pref.FileDescriptor) {}
func (fd *File) ProtoInternal(pragma.DoNotImplement) {}
@@ -98,15 +100,6 @@
fd.mu.Unlock()
}
-// ProtoLegacyRawDesc is a pseudo-internal API for allowing the v1 code
-// to be able to retrieve the raw descriptor.
-//
-// WARNING: This method is exempt from the compatibility promise and may be
-// removed in the future without warning.
-func (fd *File) ProtoLegacyRawDesc() []byte {
- return fd.builder.RawDescriptor
-}
-
// GoPackagePath is a pseudo-internal API for determining the Go package path
// that this file descriptor is declared in.
//
@@ -202,20 +195,21 @@
L1 FieldL1
}
FieldL1 struct {
- Options func() pref.ProtoMessage
- Number pref.FieldNumber
- Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers
- Kind pref.Kind
- JSONName jsonName
- IsWeak bool // promoted from google.protobuf.FieldOptions
- HasPacked bool // promoted from google.protobuf.FieldOptions
- IsPacked bool // promoted from google.protobuf.FieldOptions
- HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions
- EnforceUTF8 bool // promoted from google.protobuf.FieldOptions
- Default defaultValue
- ContainingOneof pref.OneofDescriptor // must be consistent with Message.Oneofs.Fields
- Enum pref.EnumDescriptor
- Message pref.MessageDescriptor
+ Options func() pref.ProtoMessage
+ Number pref.FieldNumber
+ Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers
+ Kind pref.Kind
+ StringName stringName
+ IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
+ IsWeak bool // promoted from google.protobuf.FieldOptions
+ HasPacked bool // promoted from google.protobuf.FieldOptions
+ IsPacked bool // promoted from google.protobuf.FieldOptions
+ HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions
+ EnforceUTF8 bool // promoted from google.protobuf.FieldOptions
+ Default defaultValue
+ ContainingOneof pref.OneofDescriptor // must be consistent with Message.Oneofs.Fields
+ Enum pref.EnumDescriptor
+ Message pref.MessageDescriptor
}
Oneof struct {
@@ -275,8 +269,15 @@
func (fd *Field) Number() pref.FieldNumber { return fd.L1.Number }
func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality }
func (fd *Field) Kind() pref.Kind { return fd.L1.Kind }
-func (fd *Field) HasJSONName() bool { return fd.L1.JSONName.has }
-func (fd *Field) JSONName() string { return fd.L1.JSONName.get(fd) }
+func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON }
+func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) }
+func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) }
+func (fd *Field) HasPresence() bool {
+ return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil)
+}
+func (fd *Field) HasOptionalKeyword() bool {
+ return (fd.L0.ParentFile.L1.Syntax == pref.Proto2 && fd.L1.Cardinality == pref.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional
+}
func (fd *Field) IsPacked() bool {
if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != pref.Proto2 && fd.L1.Cardinality == pref.Repeated {
switch fd.L1.Kind {
@@ -295,13 +296,13 @@
if !fd.IsMap() {
return nil
}
- return fd.Message().Fields().ByNumber(1)
+ return fd.Message().Fields().ByNumber(genid.MapEntry_Key_field_number)
}
func (fd *Field) MapValue() pref.FieldDescriptor {
if !fd.IsMap() {
return nil
}
- return fd.Message().Fields().ByNumber(2)
+ return fd.Message().Fields().ByNumber(genid.MapEntry_Value_field_number)
}
func (fd *Field) HasDefault() bool { return fd.L1.Default.has }
func (fd *Field) Default() pref.Value { return fd.L1.Default.get(fd) }
@@ -338,6 +339,9 @@
return fd.L0.ParentFile.L1.Syntax == pref.Proto3
}
+func (od *Oneof) IsSynthetic() bool {
+ return od.L0.ParentFile.L1.Syntax == pref.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword()
+}
func (od *Oneof) Options() pref.ProtoMessage {
if f := od.L1.Options; f != nil {
return f()
@@ -361,12 +365,13 @@
Kind pref.Kind
}
ExtensionL2 struct {
- Options func() pref.ProtoMessage
- JSONName jsonName
- IsPacked bool // promoted from google.protobuf.FieldOptions
- Default defaultValue
- Enum pref.EnumDescriptor
- Message pref.MessageDescriptor
+ Options func() pref.ProtoMessage
+ StringName stringName
+ IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
+ IsPacked bool // promoted from google.protobuf.FieldOptions
+ Default defaultValue
+ Enum pref.EnumDescriptor
+ Message pref.MessageDescriptor
}
)
@@ -376,11 +381,16 @@
}
return descopts.Field
}
-func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number }
-func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality }
-func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind }
-func (xd *Extension) HasJSONName() bool { return xd.lazyInit().JSONName.has }
-func (xd *Extension) JSONName() string { return xd.lazyInit().JSONName.get(xd) }
+func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number }
+func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality }
+func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind }
+func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON }
+func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) }
+func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) }
+func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != pref.Repeated }
+func (xd *Extension) HasOptionalKeyword() bool {
+ return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional
+}
func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked }
func (xd *Extension) IsExtension() bool { return true }
func (xd *Extension) IsWeak() bool { return false }
@@ -490,27 +500,50 @@
func (d *Base) IsPlaceholder() bool { return false }
func (d *Base) ProtoInternal(pragma.DoNotImplement) {}
-type jsonName struct {
- has bool
- once sync.Once
- name string
+type stringName struct {
+ hasJSON bool
+ once sync.Once
+ nameJSON string
+ nameText string
}
-// Init initializes the name. It is exported for use by other internal packages.
-func (js *jsonName) Init(s string) {
- js.has = true
- js.name = s
+// InitJSON initializes the name. It is exported for use by other internal packages.
+func (s *stringName) InitJSON(name string) {
+ s.hasJSON = true
+ s.nameJSON = name
}
-func (js *jsonName) get(fd pref.FieldDescriptor) string {
- if !js.has {
- js.once.Do(func() {
- js.name = strs.JSONCamelCase(string(fd.Name()))
- })
- }
- return js.name
+func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName {
+ s.once.Do(func() {
+ if fd.IsExtension() {
+ // For extensions, JSON and text are formatted the same way.
+ var name string
+ if messageset.IsMessageSetExtension(fd) {
+ name = string("[" + fd.FullName().Parent() + "]")
+ } else {
+ name = string("[" + fd.FullName() + "]")
+ }
+ s.nameJSON = name
+ s.nameText = name
+ } else {
+ // Format the JSON name.
+ if !s.hasJSON {
+ s.nameJSON = strs.JSONCamelCase(string(fd.Name()))
+ }
+
+ // Format the text name.
+ s.nameText = string(fd.Name())
+ if fd.Kind() == pref.GroupKind {
+ s.nameText = string(fd.Message().Name())
+ }
+ }
+ })
+ return s
}
+func (s *stringName) getJSON(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameJSON }
+func (s *stringName) getText(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameText }
+
func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue {
dv := defaultValue{has: v.IsValid(), val: v, enum: ev}
if b, ok := v.Interface().([]byte); ok {
@@ -592,7 +625,7 @@
// TODO: Avoid panic if we're running with the race detector
// and instead spawn a goroutine that periodically resets
// this value back to the original to induce a race.
- panic("detected mutation on the default bytes")
+ panic(fmt.Sprintf("detected mutation on the default bytes for %v", fd.FullName()))
}
return dv.val
}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
index c0cddf8..66e1fee 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
@@ -8,7 +8,7 @@
"sync"
"google.golang.org/protobuf/encoding/protowire"
- "google.golang.org/protobuf/internal/fieldnum"
+ "google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/strs"
pref "google.golang.org/protobuf/reflect/protoreflect"
)
@@ -107,7 +107,7 @@
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
- case fieldnum.FileDescriptorProto_Syntax:
+ case genid.FileDescriptorProto_Syntax_field_number:
switch string(v) {
case "proto2":
fd.L1.Syntax = pref.Proto2
@@ -116,36 +116,36 @@
default:
panic("invalid syntax")
}
- case fieldnum.FileDescriptorProto_Name:
+ case genid.FileDescriptorProto_Name_field_number:
fd.L1.Path = sb.MakeString(v)
- case fieldnum.FileDescriptorProto_Package:
+ case genid.FileDescriptorProto_Package_field_number:
fd.L1.Package = pref.FullName(sb.MakeString(v))
- case fieldnum.FileDescriptorProto_EnumType:
- if prevField != fieldnum.FileDescriptorProto_EnumType {
+ case genid.FileDescriptorProto_EnumType_field_number:
+ if prevField != genid.FileDescriptorProto_EnumType_field_number {
if numEnums > 0 {
panic("non-contiguous repeated field")
}
posEnums = len(b0) - len(b) - n - m
}
numEnums++
- case fieldnum.FileDescriptorProto_MessageType:
- if prevField != fieldnum.FileDescriptorProto_MessageType {
+ case genid.FileDescriptorProto_MessageType_field_number:
+ if prevField != genid.FileDescriptorProto_MessageType_field_number {
if numMessages > 0 {
panic("non-contiguous repeated field")
}
posMessages = len(b0) - len(b) - n - m
}
numMessages++
- case fieldnum.FileDescriptorProto_Extension:
- if prevField != fieldnum.FileDescriptorProto_Extension {
+ case genid.FileDescriptorProto_Extension_field_number:
+ if prevField != genid.FileDescriptorProto_Extension_field_number {
if numExtensions > 0 {
panic("non-contiguous repeated field")
}
posExtensions = len(b0) - len(b) - n - m
}
numExtensions++
- case fieldnum.FileDescriptorProto_Service:
- if prevField != fieldnum.FileDescriptorProto_Service {
+ case genid.FileDescriptorProto_Service_field_number:
+ if prevField != genid.FileDescriptorProto_Service_field_number {
if numServices > 0 {
panic("non-contiguous repeated field")
}
@@ -233,9 +233,9 @@
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
- case fieldnum.EnumDescriptorProto_Name:
+ case genid.EnumDescriptorProto_Name_field_number:
ed.L0.FullName = appendFullName(sb, pd.FullName(), v)
- case fieldnum.EnumDescriptorProto_Value:
+ case genid.EnumDescriptorProto_Value_field_number:
numValues++
}
default:
@@ -260,7 +260,7 @@
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
- case fieldnum.EnumDescriptorProto_Value:
+ case genid.EnumDescriptorProto_Value_field_number:
ed.L2.Values.List[i].unmarshalFull(v, sb, pf, ed, i)
i++
}
@@ -288,33 +288,33 @@
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
- case fieldnum.DescriptorProto_Name:
+ case genid.DescriptorProto_Name_field_number:
md.L0.FullName = appendFullName(sb, pd.FullName(), v)
- case fieldnum.DescriptorProto_EnumType:
- if prevField != fieldnum.DescriptorProto_EnumType {
+ case genid.DescriptorProto_EnumType_field_number:
+ if prevField != genid.DescriptorProto_EnumType_field_number {
if numEnums > 0 {
panic("non-contiguous repeated field")
}
posEnums = len(b0) - len(b) - n - m
}
numEnums++
- case fieldnum.DescriptorProto_NestedType:
- if prevField != fieldnum.DescriptorProto_NestedType {
+ case genid.DescriptorProto_NestedType_field_number:
+ if prevField != genid.DescriptorProto_NestedType_field_number {
if numMessages > 0 {
panic("non-contiguous repeated field")
}
posMessages = len(b0) - len(b) - n - m
}
numMessages++
- case fieldnum.DescriptorProto_Extension:
- if prevField != fieldnum.DescriptorProto_Extension {
+ case genid.DescriptorProto_Extension_field_number:
+ if prevField != genid.DescriptorProto_Extension_field_number {
if numExtensions > 0 {
panic("non-contiguous repeated field")
}
posExtensions = len(b0) - len(b) - n - m
}
numExtensions++
- case fieldnum.DescriptorProto_Options:
+ case genid.DescriptorProto_Options_field_number:
md.unmarshalSeedOptions(v)
}
prevField = num
@@ -375,9 +375,9 @@
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
- case fieldnum.MessageOptions_MapEntry:
+ case genid.MessageOptions_MapEntry_field_number:
md.L1.IsMapEntry = protowire.DecodeBool(v)
- case fieldnum.MessageOptions_MessageSetWireFormat:
+ case genid.MessageOptions_MessageSetWireFormat_field_number:
md.L1.IsMessageSet = protowire.DecodeBool(v)
}
default:
@@ -400,20 +400,20 @@
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
- case fieldnum.FieldDescriptorProto_Number:
+ case genid.FieldDescriptorProto_Number_field_number:
xd.L1.Number = pref.FieldNumber(v)
- case fieldnum.FieldDescriptorProto_Label:
+ case genid.FieldDescriptorProto_Label_field_number:
xd.L1.Cardinality = pref.Cardinality(v)
- case fieldnum.FieldDescriptorProto_Type:
+ case genid.FieldDescriptorProto_Type_field_number:
xd.L1.Kind = pref.Kind(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
- case fieldnum.FieldDescriptorProto_Name:
+ case genid.FieldDescriptorProto_Name_field_number:
xd.L0.FullName = appendFullName(sb, pd.FullName(), v)
- case fieldnum.FieldDescriptorProto_Extendee:
+ case genid.FieldDescriptorProto_Extendee_field_number:
xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v))
}
default:
@@ -436,7 +436,7 @@
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
- case fieldnum.ServiceDescriptorProto_Name:
+ case genid.ServiceDescriptorProto_Name_field_number:
sd.L0.FullName = appendFullName(sb, pd.FullName(), v)
}
default:
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
index cdf3164..198451e 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
@@ -10,7 +10,7 @@
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/descopts"
- "google.golang.org/protobuf/internal/fieldnum"
+ "google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/strs"
"google.golang.org/protobuf/proto"
pref "google.golang.org/protobuf/reflect/protoreflect"
@@ -143,35 +143,35 @@
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
- case fieldnum.FileDescriptorProto_PublicDependency:
+ case genid.FileDescriptorProto_PublicDependency_field_number:
fd.L2.Imports[v].IsPublic = true
- case fieldnum.FileDescriptorProto_WeakDependency:
+ case genid.FileDescriptorProto_WeakDependency_field_number:
fd.L2.Imports[v].IsWeak = true
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
- case fieldnum.FileDescriptorProto_Dependency:
+ case genid.FileDescriptorProto_Dependency_field_number:
path := sb.MakeString(v)
imp, _ := fd.builder.FileRegistry.FindFileByPath(path)
if imp == nil {
imp = PlaceholderFile(path)
}
fd.L2.Imports = append(fd.L2.Imports, pref.FileImport{FileDescriptor: imp})
- case fieldnum.FileDescriptorProto_EnumType:
+ case genid.FileDescriptorProto_EnumType_field_number:
fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb)
enumIdx++
- case fieldnum.FileDescriptorProto_MessageType:
+ case genid.FileDescriptorProto_MessageType_field_number:
fd.L1.Messages.List[messageIdx].unmarshalFull(v, sb)
messageIdx++
- case fieldnum.FileDescriptorProto_Extension:
+ case genid.FileDescriptorProto_Extension_field_number:
fd.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb)
extensionIdx++
- case fieldnum.FileDescriptorProto_Service:
+ case genid.FileDescriptorProto_Service_field_number:
fd.L1.Services.List[serviceIdx].unmarshalFull(v, sb)
serviceIdx++
- case fieldnum.FileDescriptorProto_Options:
+ case genid.FileDescriptorProto_Options_field_number:
rawOptions = appendOptions(rawOptions, v)
}
default:
@@ -196,13 +196,13 @@
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
- case fieldnum.EnumDescriptorProto_Value:
+ case genid.EnumDescriptorProto_Value_field_number:
rawValues = append(rawValues, v)
- case fieldnum.EnumDescriptorProto_ReservedName:
+ case genid.EnumDescriptorProto_ReservedName_field_number:
ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, pref.Name(sb.MakeString(v)))
- case fieldnum.EnumDescriptorProto_ReservedRange:
+ case genid.EnumDescriptorProto_ReservedRange_field_number:
ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v))
- case fieldnum.EnumDescriptorProto_Options:
+ case genid.EnumDescriptorProto_Options_field_number:
rawOptions = appendOptions(rawOptions, v)
}
default:
@@ -228,9 +228,9 @@
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
- case fieldnum.EnumDescriptorProto_EnumReservedRange_Start:
+ case genid.EnumDescriptorProto_EnumReservedRange_Start_field_number:
r[0] = pref.EnumNumber(v)
- case fieldnum.EnumDescriptorProto_EnumReservedRange_End:
+ case genid.EnumDescriptorProto_EnumReservedRange_End_field_number:
r[1] = pref.EnumNumber(v)
}
default:
@@ -255,17 +255,17 @@
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
- case fieldnum.EnumValueDescriptorProto_Number:
+ case genid.EnumValueDescriptorProto_Number_field_number:
vd.L1.Number = pref.EnumNumber(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
- case fieldnum.EnumValueDescriptorProto_Name:
+ case genid.EnumValueDescriptorProto_Name_field_number:
// NOTE: Enum values are in the same scope as the enum parent.
vd.L0.FullName = appendFullName(sb, pd.Parent().FullName(), v)
- case fieldnum.EnumValueDescriptorProto_Options:
+ case genid.EnumValueDescriptorProto_Options_field_number:
rawOptions = appendOptions(rawOptions, v)
}
default:
@@ -289,29 +289,29 @@
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
- case fieldnum.DescriptorProto_Field:
+ case genid.DescriptorProto_Field_field_number:
rawFields = append(rawFields, v)
- case fieldnum.DescriptorProto_OneofDecl:
+ case genid.DescriptorProto_OneofDecl_field_number:
rawOneofs = append(rawOneofs, v)
- case fieldnum.DescriptorProto_ReservedName:
+ case genid.DescriptorProto_ReservedName_field_number:
md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, pref.Name(sb.MakeString(v)))
- case fieldnum.DescriptorProto_ReservedRange:
+ case genid.DescriptorProto_ReservedRange_field_number:
md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v))
- case fieldnum.DescriptorProto_ExtensionRange:
+ case genid.DescriptorProto_ExtensionRange_field_number:
r, rawOptions := unmarshalMessageExtensionRange(v)
opts := md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.ExtensionRange, rawOptions)
md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, r)
md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, opts)
- case fieldnum.DescriptorProto_EnumType:
+ case genid.DescriptorProto_EnumType_field_number:
md.L1.Enums.List[enumIdx].unmarshalFull(v, sb)
enumIdx++
- case fieldnum.DescriptorProto_NestedType:
+ case genid.DescriptorProto_NestedType_field_number:
md.L1.Messages.List[messageIdx].unmarshalFull(v, sb)
messageIdx++
- case fieldnum.DescriptorProto_Extension:
+ case genid.DescriptorProto_Extension_field_number:
md.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb)
extensionIdx++
- case fieldnum.DescriptorProto_Options:
+ case genid.DescriptorProto_Options_field_number:
md.unmarshalOptions(v)
rawOptions = appendOptions(rawOptions, v)
}
@@ -347,9 +347,9 @@
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
- case fieldnum.MessageOptions_MapEntry:
+ case genid.MessageOptions_MapEntry_field_number:
md.L1.IsMapEntry = protowire.DecodeBool(v)
- case fieldnum.MessageOptions_MessageSetWireFormat:
+ case genid.MessageOptions_MessageSetWireFormat_field_number:
md.L1.IsMessageSet = protowire.DecodeBool(v)
}
default:
@@ -368,9 +368,9 @@
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
- case fieldnum.DescriptorProto_ReservedRange_Start:
+ case genid.DescriptorProto_ReservedRange_Start_field_number:
r[0] = pref.FieldNumber(v)
- case fieldnum.DescriptorProto_ReservedRange_End:
+ case genid.DescriptorProto_ReservedRange_End_field_number:
r[1] = pref.FieldNumber(v)
}
default:
@@ -390,16 +390,16 @@
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
- case fieldnum.DescriptorProto_ExtensionRange_Start:
+ case genid.DescriptorProto_ExtensionRange_Start_field_number:
r[0] = pref.FieldNumber(v)
- case fieldnum.DescriptorProto_ExtensionRange_End:
+ case genid.DescriptorProto_ExtensionRange_End_field_number:
r[1] = pref.FieldNumber(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
- case fieldnum.DescriptorProto_ExtensionRange_Options:
+ case genid.DescriptorProto_ExtensionRange_Options_field_number:
rawOptions = appendOptions(rawOptions, v)
}
default:
@@ -425,13 +425,13 @@
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
- case fieldnum.FieldDescriptorProto_Number:
+ case genid.FieldDescriptorProto_Number_field_number:
fd.L1.Number = pref.FieldNumber(v)
- case fieldnum.FieldDescriptorProto_Label:
+ case genid.FieldDescriptorProto_Label_field_number:
fd.L1.Cardinality = pref.Cardinality(v)
- case fieldnum.FieldDescriptorProto_Type:
+ case genid.FieldDescriptorProto_Type_field_number:
fd.L1.Kind = pref.Kind(v)
- case fieldnum.FieldDescriptorProto_OneofIndex:
+ case genid.FieldDescriptorProto_OneofIndex_field_number:
// In Message.unmarshalFull, we allocate slices for both
// the field and oneof descriptors before unmarshaling either
// of them. This ensures pointers to slice elements are stable.
@@ -441,20 +441,22 @@
panic("oneof type already set")
}
fd.L1.ContainingOneof = od
+ case genid.FieldDescriptorProto_Proto3Optional_field_number:
+ fd.L1.IsProto3Optional = protowire.DecodeBool(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
- case fieldnum.FieldDescriptorProto_Name:
+ case genid.FieldDescriptorProto_Name_field_number:
fd.L0.FullName = appendFullName(sb, pd.FullName(), v)
- case fieldnum.FieldDescriptorProto_JsonName:
- fd.L1.JSONName.Init(sb.MakeString(v))
- case fieldnum.FieldDescriptorProto_DefaultValue:
+ case genid.FieldDescriptorProto_JsonName_field_number:
+ fd.L1.StringName.InitJSON(sb.MakeString(v))
+ case genid.FieldDescriptorProto_DefaultValue_field_number:
fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages
- case fieldnum.FieldDescriptorProto_TypeName:
+ case genid.FieldDescriptorProto_TypeName_field_number:
rawTypeName = v
- case fieldnum.FieldDescriptorProto_Options:
+ case genid.FieldDescriptorProto_Options_field_number:
fd.unmarshalOptions(v)
rawOptions = appendOptions(rawOptions, v)
}
@@ -486,10 +488,10 @@
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
- case fieldnum.FieldOptions_Packed:
+ case genid.FieldOptions_Packed_field_number:
fd.L1.HasPacked = true
fd.L1.IsPacked = protowire.DecodeBool(v)
- case fieldnum.FieldOptions_Weak:
+ case genid.FieldOptions_Weak_field_number:
fd.L1.IsWeak = protowire.DecodeBool(v)
case FieldOptions_EnforceUTF8:
fd.L1.HasEnforceUTF8 = true
@@ -516,9 +518,9 @@
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
- case fieldnum.OneofDescriptorProto_Name:
+ case genid.OneofDescriptorProto_Name_field_number:
od.L0.FullName = appendFullName(sb, pd.FullName(), v)
- case fieldnum.OneofDescriptorProto_Options:
+ case genid.OneofDescriptorProto_Options_field_number:
rawOptions = appendOptions(rawOptions, v)
}
default:
@@ -537,17 +539,24 @@
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case genid.FieldDescriptorProto_Proto3Optional_field_number:
+ xd.L2.IsProto3Optional = protowire.DecodeBool(v)
+ }
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
- case fieldnum.FieldDescriptorProto_JsonName:
- xd.L2.JSONName.Init(sb.MakeString(v))
- case fieldnum.FieldDescriptorProto_DefaultValue:
+ case genid.FieldDescriptorProto_JsonName_field_number:
+ xd.L2.StringName.InitJSON(sb.MakeString(v))
+ case genid.FieldDescriptorProto_DefaultValue_field_number:
xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions
- case fieldnum.FieldDescriptorProto_TypeName:
+ case genid.FieldDescriptorProto_TypeName_field_number:
rawTypeName = v
- case fieldnum.FieldDescriptorProto_Options:
+ case genid.FieldDescriptorProto_Options_field_number:
xd.unmarshalOptions(v)
rawOptions = appendOptions(rawOptions, v)
}
@@ -577,7 +586,7 @@
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
- case fieldnum.FieldOptions_Packed:
+ case genid.FieldOptions_Packed_field_number:
xd.L2.IsPacked = protowire.DecodeBool(v)
}
default:
@@ -599,9 +608,9 @@
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
- case fieldnum.ServiceDescriptorProto_Method:
+ case genid.ServiceDescriptorProto_Method_field_number:
rawMethods = append(rawMethods, v)
- case fieldnum.ServiceDescriptorProto_Options:
+ case genid.ServiceDescriptorProto_Options_field_number:
rawOptions = appendOptions(rawOptions, v)
}
default:
@@ -632,22 +641,22 @@
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
- case fieldnum.MethodDescriptorProto_ClientStreaming:
+ case genid.MethodDescriptorProto_ClientStreaming_field_number:
md.L1.IsStreamingClient = protowire.DecodeBool(v)
- case fieldnum.MethodDescriptorProto_ServerStreaming:
+ case genid.MethodDescriptorProto_ServerStreaming_field_number:
md.L1.IsStreamingServer = protowire.DecodeBool(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
- case fieldnum.MethodDescriptorProto_Name:
+ case genid.MethodDescriptorProto_Name_field_number:
md.L0.FullName = appendFullName(sb, pd.FullName(), v)
- case fieldnum.MethodDescriptorProto_InputType:
+ case genid.MethodDescriptorProto_InputType_field_number:
md.L1.Input = PlaceholderMessage(makeFullName(sb, v))
- case fieldnum.MethodDescriptorProto_OutputType:
+ case genid.MethodDescriptorProto_OutputType_field_number:
md.L1.Output = PlaceholderMessage(makeFullName(sb, v))
- case fieldnum.MethodDescriptorProto_Options:
+ case genid.MethodDescriptorProto_Options_field_number:
rawOptions = appendOptions(rawOptions, v)
}
default:
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go
index 1b7089b..aa294ff 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go
@@ -10,6 +10,8 @@
"sort"
"sync"
+ "google.golang.org/protobuf/internal/genid"
+
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/descfmt"
"google.golang.org/protobuf/internal/errors"
@@ -185,10 +187,7 @@
// Unlike the FieldNumber.IsValid method, it allows ranges that cover the
// reserved number range.
func isValidFieldNumber(n protoreflect.FieldNumber, isMessageSet bool) bool {
- if isMessageSet {
- return protowire.MinValidNumber <= n && n <= math.MaxInt32
- }
- return protowire.MinValidNumber <= n && n <= protowire.MaxValidNumber
+ return protowire.MinValidNumber <= n && (n <= protowire.MaxValidNumber || isMessageSet)
}
// CheckOverlap reports an error if p and q overlap.
@@ -249,6 +248,7 @@
once sync.Once
byName map[pref.Name]pref.FieldDescriptor // protected by once
byJSON map[string]pref.FieldDescriptor // protected by once
+ byText map[string]pref.FieldDescriptor // protected by once
byNum map[pref.FieldNumber]pref.FieldDescriptor // protected by once
}
@@ -256,6 +256,7 @@
func (p *OneofFields) Get(i int) pref.FieldDescriptor { return p.List[i] }
func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor { return p.lazyInit().byName[s] }
func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor { return p.lazyInit().byJSON[s] }
+func (p *OneofFields) ByTextName(s string) pref.FieldDescriptor { return p.lazyInit().byText[s] }
func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] }
func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {}
@@ -265,11 +266,13 @@
if len(p.List) > 0 {
p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List))
p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List))
+ p.byText = make(map[string]pref.FieldDescriptor, len(p.List))
p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List))
for _, f := range p.List {
// Field names and numbers are guaranteed to be unique.
p.byName[f.Name()] = f
p.byJSON[f.JSONName()] = f
+ p.byText[f.TextName()] = f
p.byNum[f.Number()] = f
}
}
@@ -278,9 +281,170 @@
}
type SourceLocations struct {
+ // List is a list of SourceLocations.
+ // The SourceLocation.Next field does not need to be populated
+ // as it will be lazily populated upon first need.
List []pref.SourceLocation
+
+ // File is the parent file descriptor that these locations are relative to.
+ // If non-nil, ByDescriptor verifies that the provided descriptor
+ // is a child of this file descriptor.
+ File pref.FileDescriptor
+
+ once sync.Once
+ byPath map[pathKey]int
}
-func (p *SourceLocations) Len() int { return len(p.List) }
-func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.List[i] }
+func (p *SourceLocations) Len() int { return len(p.List) }
+func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.lazyInit().List[i] }
+func (p *SourceLocations) byKey(k pathKey) pref.SourceLocation {
+ if i, ok := p.lazyInit().byPath[k]; ok {
+ return p.List[i]
+ }
+ return pref.SourceLocation{}
+}
+func (p *SourceLocations) ByPath(path pref.SourcePath) pref.SourceLocation {
+ return p.byKey(newPathKey(path))
+}
+func (p *SourceLocations) ByDescriptor(desc pref.Descriptor) pref.SourceLocation {
+ if p.File != nil && desc != nil && p.File != desc.ParentFile() {
+ return pref.SourceLocation{} // mismatching parent files
+ }
+ var pathArr [16]int32
+ path := pathArr[:0]
+ for {
+ switch desc.(type) {
+ case pref.FileDescriptor:
+ // Reverse the path since it was constructed in reverse.
+ for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 {
+ path[i], path[j] = path[j], path[i]
+ }
+ return p.byKey(newPathKey(path))
+ case pref.MessageDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case pref.FileDescriptor:
+ path = append(path, int32(genid.FileDescriptorProto_MessageType_field_number))
+ case pref.MessageDescriptor:
+ path = append(path, int32(genid.DescriptorProto_NestedType_field_number))
+ default:
+ return pref.SourceLocation{}
+ }
+ case pref.FieldDescriptor:
+ isExtension := desc.(pref.FieldDescriptor).IsExtension()
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ if isExtension {
+ switch desc.(type) {
+ case pref.FileDescriptor:
+ path = append(path, int32(genid.FileDescriptorProto_Extension_field_number))
+ case pref.MessageDescriptor:
+ path = append(path, int32(genid.DescriptorProto_Extension_field_number))
+ default:
+ return pref.SourceLocation{}
+ }
+ } else {
+ switch desc.(type) {
+ case pref.MessageDescriptor:
+ path = append(path, int32(genid.DescriptorProto_Field_field_number))
+ default:
+ return pref.SourceLocation{}
+ }
+ }
+ case pref.OneofDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case pref.MessageDescriptor:
+ path = append(path, int32(genid.DescriptorProto_OneofDecl_field_number))
+ default:
+ return pref.SourceLocation{}
+ }
+ case pref.EnumDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case pref.FileDescriptor:
+ path = append(path, int32(genid.FileDescriptorProto_EnumType_field_number))
+ case pref.MessageDescriptor:
+ path = append(path, int32(genid.DescriptorProto_EnumType_field_number))
+ default:
+ return pref.SourceLocation{}
+ }
+ case pref.EnumValueDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case pref.EnumDescriptor:
+ path = append(path, int32(genid.EnumDescriptorProto_Value_field_number))
+ default:
+ return pref.SourceLocation{}
+ }
+ case pref.ServiceDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case pref.FileDescriptor:
+ path = append(path, int32(genid.FileDescriptorProto_Service_field_number))
+ default:
+ return pref.SourceLocation{}
+ }
+ case pref.MethodDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case pref.ServiceDescriptor:
+ path = append(path, int32(genid.ServiceDescriptorProto_Method_field_number))
+ default:
+ return pref.SourceLocation{}
+ }
+ default:
+ return pref.SourceLocation{}
+ }
+ }
+}
+func (p *SourceLocations) lazyInit() *SourceLocations {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ // Collect all the indexes for a given path.
+ pathIdxs := make(map[pathKey][]int, len(p.List))
+ for i, l := range p.List {
+ k := newPathKey(l.Path)
+ pathIdxs[k] = append(pathIdxs[k], i)
+ }
+
+ // Update the next index for all locations.
+ p.byPath = make(map[pathKey]int, len(p.List))
+ for k, idxs := range pathIdxs {
+ for i := 0; i < len(idxs)-1; i++ {
+ p.List[idxs[i]].Next = idxs[i+1]
+ }
+ p.List[idxs[len(idxs)-1]].Next = 0
+ p.byPath[k] = idxs[0] // record the first location for this path
+ }
+ }
+ })
+ return p
+}
func (p *SourceLocations) ProtoInternal(pragma.DoNotImplement) {}
+
+// pathKey is a comparable representation of protoreflect.SourcePath.
+type pathKey struct {
+ arr [16]uint8 // first n-1 path segments; last element is the length
+ str string // used if the path does not fit in arr
+}
+
+func newPathKey(p pref.SourcePath) (k pathKey) {
+ if len(p) < len(k.arr) {
+ for i, ps := range p {
+ if ps < 0 || math.MaxUint8 <= ps {
+ return pathKey{str: p.String()}
+ }
+ k.arr[i] = uint8(ps)
+ }
+ k.arr[len(k.arr)-1] = uint8(len(p))
+ return k
+ }
+ return pathKey{str: p.String()}
+}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
index 6a8825e..30db19f 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
@@ -142,6 +142,7 @@
once sync.Once
byName map[protoreflect.Name]*Field // protected by once
byJSON map[string]*Field // protected by once
+ byText map[string]*Field // protected by once
byNum map[protoreflect.FieldNumber]*Field // protected by once
}
@@ -163,6 +164,12 @@
}
return nil
}
+func (p *Fields) ByTextName(s string) protoreflect.FieldDescriptor {
+ if d := p.lazyInit().byText[s]; d != nil {
+ return d
+ }
+ return nil
+}
func (p *Fields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor {
if d := p.lazyInit().byNum[n]; d != nil {
return d
@@ -178,6 +185,7 @@
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*Field, len(p.List))
p.byJSON = make(map[string]*Field, len(p.List))
+ p.byText = make(map[string]*Field, len(p.List))
p.byNum = make(map[protoreflect.FieldNumber]*Field, len(p.List))
for i := range p.List {
d := &p.List[i]
@@ -187,6 +195,9 @@
if _, ok := p.byJSON[d.JSONName()]; !ok {
p.byJSON[d.JSONName()] = d
}
+ if _, ok := p.byText[d.TextName()]; !ok {
+ p.byText[d.TextName()] = d
+ }
if _, ok := p.byNum[d.Number()]; !ok {
p.byNum[d.Number()] = d
}
diff --git a/vendor/google.golang.org/protobuf/internal/genid/any_gen.go b/vendor/google.golang.org/protobuf/internal/genid/any_gen.go
new file mode 100644
index 0000000..e6f7d47
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/any_gen.go
@@ -0,0 +1,34 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package genid
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+const File_google_protobuf_any_proto = "google/protobuf/any.proto"
+
+// Names for google.protobuf.Any.
+const (
+ Any_message_name protoreflect.Name = "Any"
+ Any_message_fullname protoreflect.FullName = "google.protobuf.Any"
+)
+
+// Field names for google.protobuf.Any.
+const (
+ Any_TypeUrl_field_name protoreflect.Name = "type_url"
+ Any_Value_field_name protoreflect.Name = "value"
+
+ Any_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Any.type_url"
+ Any_Value_field_fullname protoreflect.FullName = "google.protobuf.Any.value"
+)
+
+// Field numbers for google.protobuf.Any.
+const (
+ Any_TypeUrl_field_number protoreflect.FieldNumber = 1
+ Any_Value_field_number protoreflect.FieldNumber = 2
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
new file mode 100644
index 0000000..df8f918
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
@@ -0,0 +1,106 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package genid
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+const File_google_protobuf_api_proto = "google/protobuf/api.proto"
+
+// Names for google.protobuf.Api.
+const (
+ Api_message_name protoreflect.Name = "Api"
+ Api_message_fullname protoreflect.FullName = "google.protobuf.Api"
+)
+
+// Field names for google.protobuf.Api.
+const (
+ Api_Name_field_name protoreflect.Name = "name"
+ Api_Methods_field_name protoreflect.Name = "methods"
+ Api_Options_field_name protoreflect.Name = "options"
+ Api_Version_field_name protoreflect.Name = "version"
+ Api_SourceContext_field_name protoreflect.Name = "source_context"
+ Api_Mixins_field_name protoreflect.Name = "mixins"
+ Api_Syntax_field_name protoreflect.Name = "syntax"
+
+ Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name"
+ Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods"
+ Api_Options_field_fullname protoreflect.FullName = "google.protobuf.Api.options"
+ Api_Version_field_fullname protoreflect.FullName = "google.protobuf.Api.version"
+ Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context"
+ Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins"
+ Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax"
+)
+
+// Field numbers for google.protobuf.Api.
+const (
+ Api_Name_field_number protoreflect.FieldNumber = 1
+ Api_Methods_field_number protoreflect.FieldNumber = 2
+ Api_Options_field_number protoreflect.FieldNumber = 3
+ Api_Version_field_number protoreflect.FieldNumber = 4
+ Api_SourceContext_field_number protoreflect.FieldNumber = 5
+ Api_Mixins_field_number protoreflect.FieldNumber = 6
+ Api_Syntax_field_number protoreflect.FieldNumber = 7
+)
+
+// Names for google.protobuf.Method.
+const (
+ Method_message_name protoreflect.Name = "Method"
+ Method_message_fullname protoreflect.FullName = "google.protobuf.Method"
+)
+
+// Field names for google.protobuf.Method.
+const (
+ Method_Name_field_name protoreflect.Name = "name"
+ Method_RequestTypeUrl_field_name protoreflect.Name = "request_type_url"
+ Method_RequestStreaming_field_name protoreflect.Name = "request_streaming"
+ Method_ResponseTypeUrl_field_name protoreflect.Name = "response_type_url"
+ Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming"
+ Method_Options_field_name protoreflect.Name = "options"
+ Method_Syntax_field_name protoreflect.Name = "syntax"
+
+ Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name"
+ Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url"
+ Method_RequestStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.request_streaming"
+ Method_ResponseTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.response_type_url"
+ Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming"
+ Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options"
+ Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax"
+)
+
+// Field numbers for google.protobuf.Method.
+const (
+ Method_Name_field_number protoreflect.FieldNumber = 1
+ Method_RequestTypeUrl_field_number protoreflect.FieldNumber = 2
+ Method_RequestStreaming_field_number protoreflect.FieldNumber = 3
+ Method_ResponseTypeUrl_field_number protoreflect.FieldNumber = 4
+ Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5
+ Method_Options_field_number protoreflect.FieldNumber = 6
+ Method_Syntax_field_number protoreflect.FieldNumber = 7
+)
+
+// Names for google.protobuf.Mixin.
+const (
+ Mixin_message_name protoreflect.Name = "Mixin"
+ Mixin_message_fullname protoreflect.FullName = "google.protobuf.Mixin"
+)
+
+// Field names for google.protobuf.Mixin.
+const (
+ Mixin_Name_field_name protoreflect.Name = "name"
+ Mixin_Root_field_name protoreflect.Name = "root"
+
+ Mixin_Name_field_fullname protoreflect.FullName = "google.protobuf.Mixin.name"
+ Mixin_Root_field_fullname protoreflect.FullName = "google.protobuf.Mixin.root"
+)
+
+// Field numbers for google.protobuf.Mixin.
+const (
+ Mixin_Name_field_number protoreflect.FieldNumber = 1
+ Mixin_Root_field_number protoreflect.FieldNumber = 2
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
new file mode 100644
index 0000000..e3cdf1c
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
@@ -0,0 +1,829 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package genid
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto"
+
+// Names for google.protobuf.FileDescriptorSet.
+const (
+ FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet"
+ FileDescriptorSet_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet"
+)
+
+// Field names for google.protobuf.FileDescriptorSet.
+const (
+ FileDescriptorSet_File_field_name protoreflect.Name = "file"
+
+ FileDescriptorSet_File_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet.file"
+)
+
+// Field numbers for google.protobuf.FileDescriptorSet.
+const (
+ FileDescriptorSet_File_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.FileDescriptorProto.
+const (
+ FileDescriptorProto_message_name protoreflect.Name = "FileDescriptorProto"
+ FileDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto"
+)
+
+// Field names for google.protobuf.FileDescriptorProto.
+const (
+ FileDescriptorProto_Name_field_name protoreflect.Name = "name"
+ FileDescriptorProto_Package_field_name protoreflect.Name = "package"
+ FileDescriptorProto_Dependency_field_name protoreflect.Name = "dependency"
+ FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency"
+ FileDescriptorProto_WeakDependency_field_name protoreflect.Name = "weak_dependency"
+ FileDescriptorProto_MessageType_field_name protoreflect.Name = "message_type"
+ FileDescriptorProto_EnumType_field_name protoreflect.Name = "enum_type"
+ FileDescriptorProto_Service_field_name protoreflect.Name = "service"
+ FileDescriptorProto_Extension_field_name protoreflect.Name = "extension"
+ FileDescriptorProto_Options_field_name protoreflect.Name = "options"
+ FileDescriptorProto_SourceCodeInfo_field_name protoreflect.Name = "source_code_info"
+ FileDescriptorProto_Syntax_field_name protoreflect.Name = "syntax"
+
+ FileDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.name"
+ FileDescriptorProto_Package_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.package"
+ FileDescriptorProto_Dependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency"
+ FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency"
+ FileDescriptorProto_WeakDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency"
+ FileDescriptorProto_MessageType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type"
+ FileDescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type"
+ FileDescriptorProto_Service_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.service"
+ FileDescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.extension"
+ FileDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.options"
+ FileDescriptorProto_SourceCodeInfo_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.source_code_info"
+ FileDescriptorProto_Syntax_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.syntax"
+)
+
+// Field numbers for google.protobuf.FileDescriptorProto.
+const (
+ FileDescriptorProto_Name_field_number protoreflect.FieldNumber = 1
+ FileDescriptorProto_Package_field_number protoreflect.FieldNumber = 2
+ FileDescriptorProto_Dependency_field_number protoreflect.FieldNumber = 3
+ FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10
+ FileDescriptorProto_WeakDependency_field_number protoreflect.FieldNumber = 11
+ FileDescriptorProto_MessageType_field_number protoreflect.FieldNumber = 4
+ FileDescriptorProto_EnumType_field_number protoreflect.FieldNumber = 5
+ FileDescriptorProto_Service_field_number protoreflect.FieldNumber = 6
+ FileDescriptorProto_Extension_field_number protoreflect.FieldNumber = 7
+ FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8
+ FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9
+ FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12
+)
+
+// Names for google.protobuf.DescriptorProto.
+const (
+ DescriptorProto_message_name protoreflect.Name = "DescriptorProto"
+ DescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto"
+)
+
+// Field names for google.protobuf.DescriptorProto.
+const (
+ DescriptorProto_Name_field_name protoreflect.Name = "name"
+ DescriptorProto_Field_field_name protoreflect.Name = "field"
+ DescriptorProto_Extension_field_name protoreflect.Name = "extension"
+ DescriptorProto_NestedType_field_name protoreflect.Name = "nested_type"
+ DescriptorProto_EnumType_field_name protoreflect.Name = "enum_type"
+ DescriptorProto_ExtensionRange_field_name protoreflect.Name = "extension_range"
+ DescriptorProto_OneofDecl_field_name protoreflect.Name = "oneof_decl"
+ DescriptorProto_Options_field_name protoreflect.Name = "options"
+ DescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range"
+ DescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name"
+
+ DescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.name"
+ DescriptorProto_Field_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.field"
+ DescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension"
+ DescriptorProto_NestedType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.nested_type"
+ DescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.enum_type"
+ DescriptorProto_ExtensionRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension_range"
+ DescriptorProto_OneofDecl_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.oneof_decl"
+ DescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.options"
+ DescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range"
+ DescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name"
+)
+
+// Field numbers for google.protobuf.DescriptorProto.
+const (
+ DescriptorProto_Name_field_number protoreflect.FieldNumber = 1
+ DescriptorProto_Field_field_number protoreflect.FieldNumber = 2
+ DescriptorProto_Extension_field_number protoreflect.FieldNumber = 6
+ DescriptorProto_NestedType_field_number protoreflect.FieldNumber = 3
+ DescriptorProto_EnumType_field_number protoreflect.FieldNumber = 4
+ DescriptorProto_ExtensionRange_field_number protoreflect.FieldNumber = 5
+ DescriptorProto_OneofDecl_field_number protoreflect.FieldNumber = 8
+ DescriptorProto_Options_field_number protoreflect.FieldNumber = 7
+ DescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 9
+ DescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 10
+)
+
+// Names for google.protobuf.DescriptorProto.ExtensionRange.
+const (
+ DescriptorProto_ExtensionRange_message_name protoreflect.Name = "ExtensionRange"
+ DescriptorProto_ExtensionRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange"
+)
+
+// Field names for google.protobuf.DescriptorProto.ExtensionRange.
+const (
+ DescriptorProto_ExtensionRange_Start_field_name protoreflect.Name = "start"
+ DescriptorProto_ExtensionRange_End_field_name protoreflect.Name = "end"
+ DescriptorProto_ExtensionRange_Options_field_name protoreflect.Name = "options"
+
+ DescriptorProto_ExtensionRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.start"
+ DescriptorProto_ExtensionRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.end"
+ DescriptorProto_ExtensionRange_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.options"
+)
+
+// Field numbers for google.protobuf.DescriptorProto.ExtensionRange.
+const (
+ DescriptorProto_ExtensionRange_Start_field_number protoreflect.FieldNumber = 1
+ DescriptorProto_ExtensionRange_End_field_number protoreflect.FieldNumber = 2
+ DescriptorProto_ExtensionRange_Options_field_number protoreflect.FieldNumber = 3
+)
+
+// Names for google.protobuf.DescriptorProto.ReservedRange.
+const (
+ DescriptorProto_ReservedRange_message_name protoreflect.Name = "ReservedRange"
+ DescriptorProto_ReservedRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange"
+)
+
+// Field names for google.protobuf.DescriptorProto.ReservedRange.
+const (
+ DescriptorProto_ReservedRange_Start_field_name protoreflect.Name = "start"
+ DescriptorProto_ReservedRange_End_field_name protoreflect.Name = "end"
+
+ DescriptorProto_ReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.start"
+ DescriptorProto_ReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.end"
+)
+
+// Field numbers for google.protobuf.DescriptorProto.ReservedRange.
+const (
+ DescriptorProto_ReservedRange_Start_field_number protoreflect.FieldNumber = 1
+ DescriptorProto_ReservedRange_End_field_number protoreflect.FieldNumber = 2
+)
+
+// Names for google.protobuf.ExtensionRangeOptions.
+const (
+ ExtensionRangeOptions_message_name protoreflect.Name = "ExtensionRangeOptions"
+ ExtensionRangeOptions_message_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions"
+)
+
+// Field names for google.protobuf.ExtensionRangeOptions.
+const (
+ ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
+
+ ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option"
+)
+
+// Field numbers for google.protobuf.ExtensionRangeOptions.
+const (
+ ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
+)
+
+// Names for google.protobuf.FieldDescriptorProto.
+const (
+ FieldDescriptorProto_message_name protoreflect.Name = "FieldDescriptorProto"
+ FieldDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto"
+)
+
+// Field names for google.protobuf.FieldDescriptorProto.
+const (
+ FieldDescriptorProto_Name_field_name protoreflect.Name = "name"
+ FieldDescriptorProto_Number_field_name protoreflect.Name = "number"
+ FieldDescriptorProto_Label_field_name protoreflect.Name = "label"
+ FieldDescriptorProto_Type_field_name protoreflect.Name = "type"
+ FieldDescriptorProto_TypeName_field_name protoreflect.Name = "type_name"
+ FieldDescriptorProto_Extendee_field_name protoreflect.Name = "extendee"
+ FieldDescriptorProto_DefaultValue_field_name protoreflect.Name = "default_value"
+ FieldDescriptorProto_OneofIndex_field_name protoreflect.Name = "oneof_index"
+ FieldDescriptorProto_JsonName_field_name protoreflect.Name = "json_name"
+ FieldDescriptorProto_Options_field_name protoreflect.Name = "options"
+ FieldDescriptorProto_Proto3Optional_field_name protoreflect.Name = "proto3_optional"
+
+ FieldDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.name"
+ FieldDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.number"
+ FieldDescriptorProto_Label_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.label"
+ FieldDescriptorProto_Type_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type"
+ FieldDescriptorProto_TypeName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type_name"
+ FieldDescriptorProto_Extendee_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.extendee"
+ FieldDescriptorProto_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.default_value"
+ FieldDescriptorProto_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.oneof_index"
+ FieldDescriptorProto_JsonName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.json_name"
+ FieldDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.options"
+ FieldDescriptorProto_Proto3Optional_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.proto3_optional"
+)
+
+// Field numbers for google.protobuf.FieldDescriptorProto.
+const (
+ FieldDescriptorProto_Name_field_number protoreflect.FieldNumber = 1
+ FieldDescriptorProto_Number_field_number protoreflect.FieldNumber = 3
+ FieldDescriptorProto_Label_field_number protoreflect.FieldNumber = 4
+ FieldDescriptorProto_Type_field_number protoreflect.FieldNumber = 5
+ FieldDescriptorProto_TypeName_field_number protoreflect.FieldNumber = 6
+ FieldDescriptorProto_Extendee_field_number protoreflect.FieldNumber = 2
+ FieldDescriptorProto_DefaultValue_field_number protoreflect.FieldNumber = 7
+ FieldDescriptorProto_OneofIndex_field_number protoreflect.FieldNumber = 9
+ FieldDescriptorProto_JsonName_field_number protoreflect.FieldNumber = 10
+ FieldDescriptorProto_Options_field_number protoreflect.FieldNumber = 8
+ FieldDescriptorProto_Proto3Optional_field_number protoreflect.FieldNumber = 17
+)
+
+// Full and short names for google.protobuf.FieldDescriptorProto.Type.
+const (
+ FieldDescriptorProto_Type_enum_fullname = "google.protobuf.FieldDescriptorProto.Type"
+ FieldDescriptorProto_Type_enum_name = "Type"
+)
+
+// Full and short names for google.protobuf.FieldDescriptorProto.Label.
+const (
+ FieldDescriptorProto_Label_enum_fullname = "google.protobuf.FieldDescriptorProto.Label"
+ FieldDescriptorProto_Label_enum_name = "Label"
+)
+
+// Names for google.protobuf.OneofDescriptorProto.
+const (
+ OneofDescriptorProto_message_name protoreflect.Name = "OneofDescriptorProto"
+ OneofDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto"
+)
+
+// Field names for google.protobuf.OneofDescriptorProto.
+const (
+ OneofDescriptorProto_Name_field_name protoreflect.Name = "name"
+ OneofDescriptorProto_Options_field_name protoreflect.Name = "options"
+
+ OneofDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.name"
+ OneofDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.options"
+)
+
+// Field numbers for google.protobuf.OneofDescriptorProto.
+const (
+ OneofDescriptorProto_Name_field_number protoreflect.FieldNumber = 1
+ OneofDescriptorProto_Options_field_number protoreflect.FieldNumber = 2
+)
+
+// Names for google.protobuf.EnumDescriptorProto.
+const (
+ EnumDescriptorProto_message_name protoreflect.Name = "EnumDescriptorProto"
+ EnumDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto"
+)
+
+// Field names for google.protobuf.EnumDescriptorProto.
+const (
+ EnumDescriptorProto_Name_field_name protoreflect.Name = "name"
+ EnumDescriptorProto_Value_field_name protoreflect.Name = "value"
+ EnumDescriptorProto_Options_field_name protoreflect.Name = "options"
+ EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range"
+ EnumDescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name"
+
+ EnumDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name"
+ EnumDescriptorProto_Value_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value"
+ EnumDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options"
+ EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range"
+ EnumDescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name"
+)
+
+// Field numbers for google.protobuf.EnumDescriptorProto.
+const (
+ EnumDescriptorProto_Name_field_number protoreflect.FieldNumber = 1
+ EnumDescriptorProto_Value_field_number protoreflect.FieldNumber = 2
+ EnumDescriptorProto_Options_field_number protoreflect.FieldNumber = 3
+ EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4
+ EnumDescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 5
+)
+
+// Names for google.protobuf.EnumDescriptorProto.EnumReservedRange.
+const (
+ EnumDescriptorProto_EnumReservedRange_message_name protoreflect.Name = "EnumReservedRange"
+ EnumDescriptorProto_EnumReservedRange_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange"
+)
+
+// Field names for google.protobuf.EnumDescriptorProto.EnumReservedRange.
+const (
+ EnumDescriptorProto_EnumReservedRange_Start_field_name protoreflect.Name = "start"
+ EnumDescriptorProto_EnumReservedRange_End_field_name protoreflect.Name = "end"
+
+ EnumDescriptorProto_EnumReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.start"
+ EnumDescriptorProto_EnumReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.end"
+)
+
+// Field numbers for google.protobuf.EnumDescriptorProto.EnumReservedRange.
+const (
+ EnumDescriptorProto_EnumReservedRange_Start_field_number protoreflect.FieldNumber = 1
+ EnumDescriptorProto_EnumReservedRange_End_field_number protoreflect.FieldNumber = 2
+)
+
+// Names for google.protobuf.EnumValueDescriptorProto.
+const (
+ EnumValueDescriptorProto_message_name protoreflect.Name = "EnumValueDescriptorProto"
+ EnumValueDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto"
+)
+
+// Field names for google.protobuf.EnumValueDescriptorProto.
+const (
+ EnumValueDescriptorProto_Name_field_name protoreflect.Name = "name"
+ EnumValueDescriptorProto_Number_field_name protoreflect.Name = "number"
+ EnumValueDescriptorProto_Options_field_name protoreflect.Name = "options"
+
+ EnumValueDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.name"
+ EnumValueDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.number"
+ EnumValueDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.options"
+)
+
+// Field numbers for google.protobuf.EnumValueDescriptorProto.
+const (
+ EnumValueDescriptorProto_Name_field_number protoreflect.FieldNumber = 1
+ EnumValueDescriptorProto_Number_field_number protoreflect.FieldNumber = 2
+ EnumValueDescriptorProto_Options_field_number protoreflect.FieldNumber = 3
+)
+
+// Names for google.protobuf.ServiceDescriptorProto.
+const (
+ ServiceDescriptorProto_message_name protoreflect.Name = "ServiceDescriptorProto"
+ ServiceDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto"
+)
+
+// Field names for google.protobuf.ServiceDescriptorProto.
+const (
+ ServiceDescriptorProto_Name_field_name protoreflect.Name = "name"
+ ServiceDescriptorProto_Method_field_name protoreflect.Name = "method"
+ ServiceDescriptorProto_Options_field_name protoreflect.Name = "options"
+
+ ServiceDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.name"
+ ServiceDescriptorProto_Method_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.method"
+ ServiceDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.options"
+)
+
+// Field numbers for google.protobuf.ServiceDescriptorProto.
+const (
+ ServiceDescriptorProto_Name_field_number protoreflect.FieldNumber = 1
+ ServiceDescriptorProto_Method_field_number protoreflect.FieldNumber = 2
+ ServiceDescriptorProto_Options_field_number protoreflect.FieldNumber = 3
+)
+
+// Names for google.protobuf.MethodDescriptorProto.
+const (
+ MethodDescriptorProto_message_name protoreflect.Name = "MethodDescriptorProto"
+ MethodDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto"
+)
+
+// Field names for google.protobuf.MethodDescriptorProto.
+const (
+ MethodDescriptorProto_Name_field_name protoreflect.Name = "name"
+ MethodDescriptorProto_InputType_field_name protoreflect.Name = "input_type"
+ MethodDescriptorProto_OutputType_field_name protoreflect.Name = "output_type"
+ MethodDescriptorProto_Options_field_name protoreflect.Name = "options"
+ MethodDescriptorProto_ClientStreaming_field_name protoreflect.Name = "client_streaming"
+ MethodDescriptorProto_ServerStreaming_field_name protoreflect.Name = "server_streaming"
+
+ MethodDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.name"
+ MethodDescriptorProto_InputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.input_type"
+ MethodDescriptorProto_OutputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.output_type"
+ MethodDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.options"
+ MethodDescriptorProto_ClientStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.client_streaming"
+ MethodDescriptorProto_ServerStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.server_streaming"
+)
+
+// Field numbers for google.protobuf.MethodDescriptorProto.
+const (
+ MethodDescriptorProto_Name_field_number protoreflect.FieldNumber = 1
+ MethodDescriptorProto_InputType_field_number protoreflect.FieldNumber = 2
+ MethodDescriptorProto_OutputType_field_number protoreflect.FieldNumber = 3
+ MethodDescriptorProto_Options_field_number protoreflect.FieldNumber = 4
+ MethodDescriptorProto_ClientStreaming_field_number protoreflect.FieldNumber = 5
+ MethodDescriptorProto_ServerStreaming_field_number protoreflect.FieldNumber = 6
+)
+
+// Names for google.protobuf.FileOptions.
+const (
+ FileOptions_message_name protoreflect.Name = "FileOptions"
+ FileOptions_message_fullname protoreflect.FullName = "google.protobuf.FileOptions"
+)
+
+// Field names for google.protobuf.FileOptions.
+const (
+ FileOptions_JavaPackage_field_name protoreflect.Name = "java_package"
+ FileOptions_JavaOuterClassname_field_name protoreflect.Name = "java_outer_classname"
+ FileOptions_JavaMultipleFiles_field_name protoreflect.Name = "java_multiple_files"
+ FileOptions_JavaGenerateEqualsAndHash_field_name protoreflect.Name = "java_generate_equals_and_hash"
+ FileOptions_JavaStringCheckUtf8_field_name protoreflect.Name = "java_string_check_utf8"
+ FileOptions_OptimizeFor_field_name protoreflect.Name = "optimize_for"
+ FileOptions_GoPackage_field_name protoreflect.Name = "go_package"
+ FileOptions_CcGenericServices_field_name protoreflect.Name = "cc_generic_services"
+ FileOptions_JavaGenericServices_field_name protoreflect.Name = "java_generic_services"
+ FileOptions_PyGenericServices_field_name protoreflect.Name = "py_generic_services"
+ FileOptions_PhpGenericServices_field_name protoreflect.Name = "php_generic_services"
+ FileOptions_Deprecated_field_name protoreflect.Name = "deprecated"
+ FileOptions_CcEnableArenas_field_name protoreflect.Name = "cc_enable_arenas"
+ FileOptions_ObjcClassPrefix_field_name protoreflect.Name = "objc_class_prefix"
+ FileOptions_CsharpNamespace_field_name protoreflect.Name = "csharp_namespace"
+ FileOptions_SwiftPrefix_field_name protoreflect.Name = "swift_prefix"
+ FileOptions_PhpClassPrefix_field_name protoreflect.Name = "php_class_prefix"
+ FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace"
+ FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace"
+ FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package"
+ FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
+
+ FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package"
+ FileOptions_JavaOuterClassname_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_outer_classname"
+ FileOptions_JavaMultipleFiles_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_multiple_files"
+ FileOptions_JavaGenerateEqualsAndHash_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generate_equals_and_hash"
+ FileOptions_JavaStringCheckUtf8_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_string_check_utf8"
+ FileOptions_OptimizeFor_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.optimize_for"
+ FileOptions_GoPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.go_package"
+ FileOptions_CcGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_generic_services"
+ FileOptions_JavaGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generic_services"
+ FileOptions_PyGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.py_generic_services"
+ FileOptions_PhpGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_generic_services"
+ FileOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.deprecated"
+ FileOptions_CcEnableArenas_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_enable_arenas"
+ FileOptions_ObjcClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.objc_class_prefix"
+ FileOptions_CsharpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.csharp_namespace"
+ FileOptions_SwiftPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.swift_prefix"
+ FileOptions_PhpClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_class_prefix"
+ FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace"
+ FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace"
+ FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package"
+ FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option"
+)
+
+// Field numbers for google.protobuf.FileOptions.
+const (
+ FileOptions_JavaPackage_field_number protoreflect.FieldNumber = 1
+ FileOptions_JavaOuterClassname_field_number protoreflect.FieldNumber = 8
+ FileOptions_JavaMultipleFiles_field_number protoreflect.FieldNumber = 10
+ FileOptions_JavaGenerateEqualsAndHash_field_number protoreflect.FieldNumber = 20
+ FileOptions_JavaStringCheckUtf8_field_number protoreflect.FieldNumber = 27
+ FileOptions_OptimizeFor_field_number protoreflect.FieldNumber = 9
+ FileOptions_GoPackage_field_number protoreflect.FieldNumber = 11
+ FileOptions_CcGenericServices_field_number protoreflect.FieldNumber = 16
+ FileOptions_JavaGenericServices_field_number protoreflect.FieldNumber = 17
+ FileOptions_PyGenericServices_field_number protoreflect.FieldNumber = 18
+ FileOptions_PhpGenericServices_field_number protoreflect.FieldNumber = 42
+ FileOptions_Deprecated_field_number protoreflect.FieldNumber = 23
+ FileOptions_CcEnableArenas_field_number protoreflect.FieldNumber = 31
+ FileOptions_ObjcClassPrefix_field_number protoreflect.FieldNumber = 36
+ FileOptions_CsharpNamespace_field_number protoreflect.FieldNumber = 37
+ FileOptions_SwiftPrefix_field_number protoreflect.FieldNumber = 39
+ FileOptions_PhpClassPrefix_field_number protoreflect.FieldNumber = 40
+ FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41
+ FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44
+ FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45
+ FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
+)
+
+// Full and short names for google.protobuf.FileOptions.OptimizeMode.
+const (
+ FileOptions_OptimizeMode_enum_fullname = "google.protobuf.FileOptions.OptimizeMode"
+ FileOptions_OptimizeMode_enum_name = "OptimizeMode"
+)
+
+// Names for google.protobuf.MessageOptions.
+const (
+ MessageOptions_message_name protoreflect.Name = "MessageOptions"
+ MessageOptions_message_fullname protoreflect.FullName = "google.protobuf.MessageOptions"
+)
+
+// Field names for google.protobuf.MessageOptions.
+const (
+ MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format"
+ MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor"
+ MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated"
+ MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry"
+ MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
+
+ MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format"
+ MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor"
+ MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated"
+ MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry"
+ MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option"
+)
+
+// Field numbers for google.protobuf.MessageOptions.
+const (
+ MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1
+ MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2
+ MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3
+ MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7
+ MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
+)
+
+// Names for google.protobuf.FieldOptions.
+const (
+ FieldOptions_message_name protoreflect.Name = "FieldOptions"
+ FieldOptions_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions"
+)
+
+// Field names for google.protobuf.FieldOptions.
+const (
+ FieldOptions_Ctype_field_name protoreflect.Name = "ctype"
+ FieldOptions_Packed_field_name protoreflect.Name = "packed"
+ FieldOptions_Jstype_field_name protoreflect.Name = "jstype"
+ FieldOptions_Lazy_field_name protoreflect.Name = "lazy"
+ FieldOptions_Deprecated_field_name protoreflect.Name = "deprecated"
+ FieldOptions_Weak_field_name protoreflect.Name = "weak"
+ FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
+
+ FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype"
+ FieldOptions_Packed_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.packed"
+ FieldOptions_Jstype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.jstype"
+ FieldOptions_Lazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.lazy"
+ FieldOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.deprecated"
+ FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak"
+ FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option"
+)
+
+// Field numbers for google.protobuf.FieldOptions.
+const (
+ FieldOptions_Ctype_field_number protoreflect.FieldNumber = 1
+ FieldOptions_Packed_field_number protoreflect.FieldNumber = 2
+ FieldOptions_Jstype_field_number protoreflect.FieldNumber = 6
+ FieldOptions_Lazy_field_number protoreflect.FieldNumber = 5
+ FieldOptions_Deprecated_field_number protoreflect.FieldNumber = 3
+ FieldOptions_Weak_field_number protoreflect.FieldNumber = 10
+ FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
+)
+
+// Full and short names for google.protobuf.FieldOptions.CType.
+const (
+ FieldOptions_CType_enum_fullname = "google.protobuf.FieldOptions.CType"
+ FieldOptions_CType_enum_name = "CType"
+)
+
+// Full and short names for google.protobuf.FieldOptions.JSType.
+const (
+ FieldOptions_JSType_enum_fullname = "google.protobuf.FieldOptions.JSType"
+ FieldOptions_JSType_enum_name = "JSType"
+)
+
+// Names for google.protobuf.OneofOptions.
+const (
+ OneofOptions_message_name protoreflect.Name = "OneofOptions"
+ OneofOptions_message_fullname protoreflect.FullName = "google.protobuf.OneofOptions"
+)
+
+// Field names for google.protobuf.OneofOptions.
+const (
+ OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
+
+ OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option"
+)
+
+// Field numbers for google.protobuf.OneofOptions.
+const (
+ OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
+)
+
+// Names for google.protobuf.EnumOptions.
+const (
+ EnumOptions_message_name protoreflect.Name = "EnumOptions"
+ EnumOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumOptions"
+)
+
+// Field names for google.protobuf.EnumOptions.
+const (
+ EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias"
+ EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated"
+ EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
+
+ EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias"
+ EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated"
+ EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option"
+)
+
+// Field numbers for google.protobuf.EnumOptions.
+const (
+ EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2
+ EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3
+ EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
+)
+
+// Names for google.protobuf.EnumValueOptions.
+const (
+ EnumValueOptions_message_name protoreflect.Name = "EnumValueOptions"
+ EnumValueOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions"
+)
+
+// Field names for google.protobuf.EnumValueOptions.
+const (
+ EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated"
+ EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
+
+ EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated"
+ EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option"
+)
+
+// Field numbers for google.protobuf.EnumValueOptions.
+const (
+ EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1
+ EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
+)
+
+// Names for google.protobuf.ServiceOptions.
+const (
+ ServiceOptions_message_name protoreflect.Name = "ServiceOptions"
+ ServiceOptions_message_fullname protoreflect.FullName = "google.protobuf.ServiceOptions"
+)
+
+// Field names for google.protobuf.ServiceOptions.
+const (
+ ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated"
+ ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
+
+ ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated"
+ ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option"
+)
+
+// Field numbers for google.protobuf.ServiceOptions.
+const (
+ ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33
+ ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
+)
+
+// Names for google.protobuf.MethodOptions.
+const (
+ MethodOptions_message_name protoreflect.Name = "MethodOptions"
+ MethodOptions_message_fullname protoreflect.FullName = "google.protobuf.MethodOptions"
+)
+
+// Field names for google.protobuf.MethodOptions.
+const (
+ MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated"
+ MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level"
+ MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
+
+ MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated"
+ MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level"
+ MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option"
+)
+
+// Field numbers for google.protobuf.MethodOptions.
+const (
+ MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33
+ MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34
+ MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
+)
+
+// Full and short names for google.protobuf.MethodOptions.IdempotencyLevel.
+const (
+ MethodOptions_IdempotencyLevel_enum_fullname = "google.protobuf.MethodOptions.IdempotencyLevel"
+ MethodOptions_IdempotencyLevel_enum_name = "IdempotencyLevel"
+)
+
+// Names for google.protobuf.UninterpretedOption.
+const (
+ UninterpretedOption_message_name protoreflect.Name = "UninterpretedOption"
+ UninterpretedOption_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption"
+)
+
+// Field names for google.protobuf.UninterpretedOption.
+const (
+ UninterpretedOption_Name_field_name protoreflect.Name = "name"
+ UninterpretedOption_IdentifierValue_field_name protoreflect.Name = "identifier_value"
+ UninterpretedOption_PositiveIntValue_field_name protoreflect.Name = "positive_int_value"
+ UninterpretedOption_NegativeIntValue_field_name protoreflect.Name = "negative_int_value"
+ UninterpretedOption_DoubleValue_field_name protoreflect.Name = "double_value"
+ UninterpretedOption_StringValue_field_name protoreflect.Name = "string_value"
+ UninterpretedOption_AggregateValue_field_name protoreflect.Name = "aggregate_value"
+
+ UninterpretedOption_Name_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.name"
+ UninterpretedOption_IdentifierValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.identifier_value"
+ UninterpretedOption_PositiveIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.positive_int_value"
+ UninterpretedOption_NegativeIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.negative_int_value"
+ UninterpretedOption_DoubleValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.double_value"
+ UninterpretedOption_StringValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.string_value"
+ UninterpretedOption_AggregateValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.aggregate_value"
+)
+
+// Field numbers for google.protobuf.UninterpretedOption.
+const (
+ UninterpretedOption_Name_field_number protoreflect.FieldNumber = 2
+ UninterpretedOption_IdentifierValue_field_number protoreflect.FieldNumber = 3
+ UninterpretedOption_PositiveIntValue_field_number protoreflect.FieldNumber = 4
+ UninterpretedOption_NegativeIntValue_field_number protoreflect.FieldNumber = 5
+ UninterpretedOption_DoubleValue_field_number protoreflect.FieldNumber = 6
+ UninterpretedOption_StringValue_field_number protoreflect.FieldNumber = 7
+ UninterpretedOption_AggregateValue_field_number protoreflect.FieldNumber = 8
+)
+
+// Names for google.protobuf.UninterpretedOption.NamePart.
+const (
+ UninterpretedOption_NamePart_message_name protoreflect.Name = "NamePart"
+ UninterpretedOption_NamePart_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart"
+)
+
+// Field names for google.protobuf.UninterpretedOption.NamePart.
+const (
+ UninterpretedOption_NamePart_NamePart_field_name protoreflect.Name = "name_part"
+ UninterpretedOption_NamePart_IsExtension_field_name protoreflect.Name = "is_extension"
+
+ UninterpretedOption_NamePart_NamePart_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.name_part"
+ UninterpretedOption_NamePart_IsExtension_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.is_extension"
+)
+
+// Field numbers for google.protobuf.UninterpretedOption.NamePart.
+const (
+ UninterpretedOption_NamePart_NamePart_field_number protoreflect.FieldNumber = 1
+ UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2
+)
+
+// Names for google.protobuf.SourceCodeInfo.
+const (
+ SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo"
+ SourceCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo"
+)
+
+// Field names for google.protobuf.SourceCodeInfo.
+const (
+ SourceCodeInfo_Location_field_name protoreflect.Name = "location"
+
+ SourceCodeInfo_Location_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.location"
+)
+
+// Field numbers for google.protobuf.SourceCodeInfo.
+const (
+ SourceCodeInfo_Location_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.SourceCodeInfo.Location.
+const (
+ SourceCodeInfo_Location_message_name protoreflect.Name = "Location"
+ SourceCodeInfo_Location_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location"
+)
+
+// Field names for google.protobuf.SourceCodeInfo.Location.
+const (
+ SourceCodeInfo_Location_Path_field_name protoreflect.Name = "path"
+ SourceCodeInfo_Location_Span_field_name protoreflect.Name = "span"
+ SourceCodeInfo_Location_LeadingComments_field_name protoreflect.Name = "leading_comments"
+ SourceCodeInfo_Location_TrailingComments_field_name protoreflect.Name = "trailing_comments"
+ SourceCodeInfo_Location_LeadingDetachedComments_field_name protoreflect.Name = "leading_detached_comments"
+
+ SourceCodeInfo_Location_Path_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.path"
+ SourceCodeInfo_Location_Span_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.span"
+ SourceCodeInfo_Location_LeadingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_comments"
+ SourceCodeInfo_Location_TrailingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.trailing_comments"
+ SourceCodeInfo_Location_LeadingDetachedComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_detached_comments"
+)
+
+// Field numbers for google.protobuf.SourceCodeInfo.Location.
+const (
+ SourceCodeInfo_Location_Path_field_number protoreflect.FieldNumber = 1
+ SourceCodeInfo_Location_Span_field_number protoreflect.FieldNumber = 2
+ SourceCodeInfo_Location_LeadingComments_field_number protoreflect.FieldNumber = 3
+ SourceCodeInfo_Location_TrailingComments_field_number protoreflect.FieldNumber = 4
+ SourceCodeInfo_Location_LeadingDetachedComments_field_number protoreflect.FieldNumber = 6
+)
+
+// Names for google.protobuf.GeneratedCodeInfo.
+const (
+ GeneratedCodeInfo_message_name protoreflect.Name = "GeneratedCodeInfo"
+ GeneratedCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo"
+)
+
+// Field names for google.protobuf.GeneratedCodeInfo.
+const (
+ GeneratedCodeInfo_Annotation_field_name protoreflect.Name = "annotation"
+
+ GeneratedCodeInfo_Annotation_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.annotation"
+)
+
+// Field numbers for google.protobuf.GeneratedCodeInfo.
+const (
+ GeneratedCodeInfo_Annotation_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.GeneratedCodeInfo.Annotation.
+const (
+ GeneratedCodeInfo_Annotation_message_name protoreflect.Name = "Annotation"
+ GeneratedCodeInfo_Annotation_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation"
+)
+
+// Field names for google.protobuf.GeneratedCodeInfo.Annotation.
+const (
+ GeneratedCodeInfo_Annotation_Path_field_name protoreflect.Name = "path"
+ GeneratedCodeInfo_Annotation_SourceFile_field_name protoreflect.Name = "source_file"
+ GeneratedCodeInfo_Annotation_Begin_field_name protoreflect.Name = "begin"
+ GeneratedCodeInfo_Annotation_End_field_name protoreflect.Name = "end"
+
+ GeneratedCodeInfo_Annotation_Path_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.path"
+ GeneratedCodeInfo_Annotation_SourceFile_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.source_file"
+ GeneratedCodeInfo_Annotation_Begin_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.begin"
+ GeneratedCodeInfo_Annotation_End_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.end"
+)
+
+// Field numbers for google.protobuf.GeneratedCodeInfo.Annotation.
+const (
+ GeneratedCodeInfo_Annotation_Path_field_number protoreflect.FieldNumber = 1
+ GeneratedCodeInfo_Annotation_SourceFile_field_number protoreflect.FieldNumber = 2
+ GeneratedCodeInfo_Annotation_Begin_field_number protoreflect.FieldNumber = 3
+ GeneratedCodeInfo_Annotation_End_field_number protoreflect.FieldNumber = 4
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go
new file mode 100644
index 0000000..45ccd01
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go
@@ -0,0 +1,11 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package genid contains constants for declarations in descriptor.proto
+// and the well-known types.
+package genid
+
+import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+
+const GoogleProtobuf_package protoreflect.FullName = "google.protobuf"
diff --git a/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go b/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go
new file mode 100644
index 0000000..b070ef4
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go
@@ -0,0 +1,34 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package genid
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+const File_google_protobuf_duration_proto = "google/protobuf/duration.proto"
+
+// Names for google.protobuf.Duration.
+const (
+ Duration_message_name protoreflect.Name = "Duration"
+ Duration_message_fullname protoreflect.FullName = "google.protobuf.Duration"
+)
+
+// Field names for google.protobuf.Duration.
+const (
+ Duration_Seconds_field_name protoreflect.Name = "seconds"
+ Duration_Nanos_field_name protoreflect.Name = "nanos"
+
+ Duration_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Duration.seconds"
+ Duration_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Duration.nanos"
+)
+
+// Field numbers for google.protobuf.Duration.
+const (
+ Duration_Seconds_field_number protoreflect.FieldNumber = 1
+ Duration_Nanos_field_number protoreflect.FieldNumber = 2
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go b/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go
new file mode 100644
index 0000000..762abb3
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go
@@ -0,0 +1,19 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package genid
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+const File_google_protobuf_empty_proto = "google/protobuf/empty.proto"
+
+// Names for google.protobuf.Empty.
+const (
+ Empty_message_name protoreflect.Name = "Empty"
+ Empty_message_fullname protoreflect.FullName = "google.protobuf.Empty"
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go b/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go
new file mode 100644
index 0000000..70bed45
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go
@@ -0,0 +1,31 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package genid
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+const File_google_protobuf_field_mask_proto = "google/protobuf/field_mask.proto"
+
+// Names for google.protobuf.FieldMask.
+const (
+ FieldMask_message_name protoreflect.Name = "FieldMask"
+ FieldMask_message_fullname protoreflect.FullName = "google.protobuf.FieldMask"
+)
+
+// Field names for google.protobuf.FieldMask.
+const (
+ FieldMask_Paths_field_name protoreflect.Name = "paths"
+
+ FieldMask_Paths_field_fullname protoreflect.FullName = "google.protobuf.FieldMask.paths"
+)
+
+// Field numbers for google.protobuf.FieldMask.
+const (
+ FieldMask_Paths_field_number protoreflect.FieldNumber = 1
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/goname.go b/vendor/google.golang.org/protobuf/internal/genid/goname.go
new file mode 100644
index 0000000..693d2e9
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/goname.go
@@ -0,0 +1,25 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package genid
+
+// Go names of implementation-specific struct fields in generated messages.
+const (
+ State_goname = "state"
+
+ SizeCache_goname = "sizeCache"
+ SizeCacheA_goname = "XXX_sizecache"
+
+ WeakFields_goname = "weakFields"
+ WeakFieldsA_goname = "XXX_weak"
+
+ UnknownFields_goname = "unknownFields"
+ UnknownFieldsA_goname = "XXX_unrecognized"
+
+ ExtensionFields_goname = "extensionFields"
+ ExtensionFieldsA_goname = "XXX_InternalExtensions"
+ ExtensionFieldsB_goname = "XXX_extensions"
+
+ WeakFieldPrefix_goname = "XXX_weak_"
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
new file mode 100644
index 0000000..8f9ea02
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
@@ -0,0 +1,16 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package genid
+
+import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+
+// Generic field names and numbers for synthetic map entry messages.
+const (
+ MapEntry_Key_field_name protoreflect.Name = "key"
+ MapEntry_Value_field_name protoreflect.Name = "value"
+
+ MapEntry_Key_field_number protoreflect.FieldNumber = 1
+ MapEntry_Value_field_number protoreflect.FieldNumber = 2
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go b/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go
new file mode 100644
index 0000000..3e99ae1
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go
@@ -0,0 +1,31 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package genid
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+const File_google_protobuf_source_context_proto = "google/protobuf/source_context.proto"
+
+// Names for google.protobuf.SourceContext.
+const (
+ SourceContext_message_name protoreflect.Name = "SourceContext"
+ SourceContext_message_fullname protoreflect.FullName = "google.protobuf.SourceContext"
+)
+
+// Field names for google.protobuf.SourceContext.
+const (
+ SourceContext_FileName_field_name protoreflect.Name = "file_name"
+
+ SourceContext_FileName_field_fullname protoreflect.FullName = "google.protobuf.SourceContext.file_name"
+)
+
+// Field numbers for google.protobuf.SourceContext.
+const (
+ SourceContext_FileName_field_number protoreflect.FieldNumber = 1
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go
new file mode 100644
index 0000000..1a38944
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go
@@ -0,0 +1,116 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package genid
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+const File_google_protobuf_struct_proto = "google/protobuf/struct.proto"
+
+// Full and short names for google.protobuf.NullValue.
+const (
+ NullValue_enum_fullname = "google.protobuf.NullValue"
+ NullValue_enum_name = "NullValue"
+)
+
+// Names for google.protobuf.Struct.
+const (
+ Struct_message_name protoreflect.Name = "Struct"
+ Struct_message_fullname protoreflect.FullName = "google.protobuf.Struct"
+)
+
+// Field names for google.protobuf.Struct.
+const (
+ Struct_Fields_field_name protoreflect.Name = "fields"
+
+ Struct_Fields_field_fullname protoreflect.FullName = "google.protobuf.Struct.fields"
+)
+
+// Field numbers for google.protobuf.Struct.
+const (
+ Struct_Fields_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.Struct.FieldsEntry.
+const (
+ Struct_FieldsEntry_message_name protoreflect.Name = "FieldsEntry"
+ Struct_FieldsEntry_message_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry"
+)
+
+// Field names for google.protobuf.Struct.FieldsEntry.
+const (
+ Struct_FieldsEntry_Key_field_name protoreflect.Name = "key"
+ Struct_FieldsEntry_Value_field_name protoreflect.Name = "value"
+
+ Struct_FieldsEntry_Key_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.key"
+ Struct_FieldsEntry_Value_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.value"
+)
+
+// Field numbers for google.protobuf.Struct.FieldsEntry.
+const (
+ Struct_FieldsEntry_Key_field_number protoreflect.FieldNumber = 1
+ Struct_FieldsEntry_Value_field_number protoreflect.FieldNumber = 2
+)
+
+// Names for google.protobuf.Value.
+const (
+ Value_message_name protoreflect.Name = "Value"
+ Value_message_fullname protoreflect.FullName = "google.protobuf.Value"
+)
+
+// Field names for google.protobuf.Value.
+const (
+ Value_NullValue_field_name protoreflect.Name = "null_value"
+ Value_NumberValue_field_name protoreflect.Name = "number_value"
+ Value_StringValue_field_name protoreflect.Name = "string_value"
+ Value_BoolValue_field_name protoreflect.Name = "bool_value"
+ Value_StructValue_field_name protoreflect.Name = "struct_value"
+ Value_ListValue_field_name protoreflect.Name = "list_value"
+
+ Value_NullValue_field_fullname protoreflect.FullName = "google.protobuf.Value.null_value"
+ Value_NumberValue_field_fullname protoreflect.FullName = "google.protobuf.Value.number_value"
+ Value_StringValue_field_fullname protoreflect.FullName = "google.protobuf.Value.string_value"
+ Value_BoolValue_field_fullname protoreflect.FullName = "google.protobuf.Value.bool_value"
+ Value_StructValue_field_fullname protoreflect.FullName = "google.protobuf.Value.struct_value"
+ Value_ListValue_field_fullname protoreflect.FullName = "google.protobuf.Value.list_value"
+)
+
+// Field numbers for google.protobuf.Value.
+const (
+ Value_NullValue_field_number protoreflect.FieldNumber = 1
+ Value_NumberValue_field_number protoreflect.FieldNumber = 2
+ Value_StringValue_field_number protoreflect.FieldNumber = 3
+ Value_BoolValue_field_number protoreflect.FieldNumber = 4
+ Value_StructValue_field_number protoreflect.FieldNumber = 5
+ Value_ListValue_field_number protoreflect.FieldNumber = 6
+)
+
+// Oneof names for google.protobuf.Value.
+const (
+ Value_Kind_oneof_name protoreflect.Name = "kind"
+
+ Value_Kind_oneof_fullname protoreflect.FullName = "google.protobuf.Value.kind"
+)
+
+// Names for google.protobuf.ListValue.
+const (
+ ListValue_message_name protoreflect.Name = "ListValue"
+ ListValue_message_fullname protoreflect.FullName = "google.protobuf.ListValue"
+)
+
+// Field names for google.protobuf.ListValue.
+const (
+ ListValue_Values_field_name protoreflect.Name = "values"
+
+ ListValue_Values_field_fullname protoreflect.FullName = "google.protobuf.ListValue.values"
+)
+
+// Field numbers for google.protobuf.ListValue.
+const (
+ ListValue_Values_field_number protoreflect.FieldNumber = 1
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go b/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go
new file mode 100644
index 0000000..f5cd563
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go
@@ -0,0 +1,34 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package genid
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+const File_google_protobuf_timestamp_proto = "google/protobuf/timestamp.proto"
+
+// Names for google.protobuf.Timestamp.
+const (
+ Timestamp_message_name protoreflect.Name = "Timestamp"
+ Timestamp_message_fullname protoreflect.FullName = "google.protobuf.Timestamp"
+)
+
+// Field names for google.protobuf.Timestamp.
+const (
+ Timestamp_Seconds_field_name protoreflect.Name = "seconds"
+ Timestamp_Nanos_field_name protoreflect.Name = "nanos"
+
+ Timestamp_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.seconds"
+ Timestamp_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.nanos"
+)
+
+// Field numbers for google.protobuf.Timestamp.
+const (
+ Timestamp_Seconds_field_number protoreflect.FieldNumber = 1
+ Timestamp_Nanos_field_number protoreflect.FieldNumber = 2
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go
new file mode 100644
index 0000000..3bc7101
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go
@@ -0,0 +1,184 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package genid
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+const File_google_protobuf_type_proto = "google/protobuf/type.proto"
+
+// Full and short names for google.protobuf.Syntax.
+const (
+ Syntax_enum_fullname = "google.protobuf.Syntax"
+ Syntax_enum_name = "Syntax"
+)
+
+// Names for google.protobuf.Type.
+const (
+ Type_message_name protoreflect.Name = "Type"
+ Type_message_fullname protoreflect.FullName = "google.protobuf.Type"
+)
+
+// Field names for google.protobuf.Type.
+const (
+ Type_Name_field_name protoreflect.Name = "name"
+ Type_Fields_field_name protoreflect.Name = "fields"
+ Type_Oneofs_field_name protoreflect.Name = "oneofs"
+ Type_Options_field_name protoreflect.Name = "options"
+ Type_SourceContext_field_name protoreflect.Name = "source_context"
+ Type_Syntax_field_name protoreflect.Name = "syntax"
+
+ Type_Name_field_fullname protoreflect.FullName = "google.protobuf.Type.name"
+ Type_Fields_field_fullname protoreflect.FullName = "google.protobuf.Type.fields"
+ Type_Oneofs_field_fullname protoreflect.FullName = "google.protobuf.Type.oneofs"
+ Type_Options_field_fullname protoreflect.FullName = "google.protobuf.Type.options"
+ Type_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Type.source_context"
+ Type_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Type.syntax"
+)
+
+// Field numbers for google.protobuf.Type.
+const (
+ Type_Name_field_number protoreflect.FieldNumber = 1
+ Type_Fields_field_number protoreflect.FieldNumber = 2
+ Type_Oneofs_field_number protoreflect.FieldNumber = 3
+ Type_Options_field_number protoreflect.FieldNumber = 4
+ Type_SourceContext_field_number protoreflect.FieldNumber = 5
+ Type_Syntax_field_number protoreflect.FieldNumber = 6
+)
+
+// Names for google.protobuf.Field.
+const (
+ Field_message_name protoreflect.Name = "Field"
+ Field_message_fullname protoreflect.FullName = "google.protobuf.Field"
+)
+
+// Field names for google.protobuf.Field.
+const (
+ Field_Kind_field_name protoreflect.Name = "kind"
+ Field_Cardinality_field_name protoreflect.Name = "cardinality"
+ Field_Number_field_name protoreflect.Name = "number"
+ Field_Name_field_name protoreflect.Name = "name"
+ Field_TypeUrl_field_name protoreflect.Name = "type_url"
+ Field_OneofIndex_field_name protoreflect.Name = "oneof_index"
+ Field_Packed_field_name protoreflect.Name = "packed"
+ Field_Options_field_name protoreflect.Name = "options"
+ Field_JsonName_field_name protoreflect.Name = "json_name"
+ Field_DefaultValue_field_name protoreflect.Name = "default_value"
+
+ Field_Kind_field_fullname protoreflect.FullName = "google.protobuf.Field.kind"
+ Field_Cardinality_field_fullname protoreflect.FullName = "google.protobuf.Field.cardinality"
+ Field_Number_field_fullname protoreflect.FullName = "google.protobuf.Field.number"
+ Field_Name_field_fullname protoreflect.FullName = "google.protobuf.Field.name"
+ Field_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Field.type_url"
+ Field_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.Field.oneof_index"
+ Field_Packed_field_fullname protoreflect.FullName = "google.protobuf.Field.packed"
+ Field_Options_field_fullname protoreflect.FullName = "google.protobuf.Field.options"
+ Field_JsonName_field_fullname protoreflect.FullName = "google.protobuf.Field.json_name"
+ Field_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.Field.default_value"
+)
+
+// Field numbers for google.protobuf.Field.
+const (
+ Field_Kind_field_number protoreflect.FieldNumber = 1
+ Field_Cardinality_field_number protoreflect.FieldNumber = 2
+ Field_Number_field_number protoreflect.FieldNumber = 3
+ Field_Name_field_number protoreflect.FieldNumber = 4
+ Field_TypeUrl_field_number protoreflect.FieldNumber = 6
+ Field_OneofIndex_field_number protoreflect.FieldNumber = 7
+ Field_Packed_field_number protoreflect.FieldNumber = 8
+ Field_Options_field_number protoreflect.FieldNumber = 9
+ Field_JsonName_field_number protoreflect.FieldNumber = 10
+ Field_DefaultValue_field_number protoreflect.FieldNumber = 11
+)
+
+// Full and short names for google.protobuf.Field.Kind.
+const (
+ Field_Kind_enum_fullname = "google.protobuf.Field.Kind"
+ Field_Kind_enum_name = "Kind"
+)
+
+// Full and short names for google.protobuf.Field.Cardinality.
+const (
+ Field_Cardinality_enum_fullname = "google.protobuf.Field.Cardinality"
+ Field_Cardinality_enum_name = "Cardinality"
+)
+
+// Names for google.protobuf.Enum.
+const (
+ Enum_message_name protoreflect.Name = "Enum"
+ Enum_message_fullname protoreflect.FullName = "google.protobuf.Enum"
+)
+
+// Field names for google.protobuf.Enum.
+const (
+ Enum_Name_field_name protoreflect.Name = "name"
+ Enum_Enumvalue_field_name protoreflect.Name = "enumvalue"
+ Enum_Options_field_name protoreflect.Name = "options"
+ Enum_SourceContext_field_name protoreflect.Name = "source_context"
+ Enum_Syntax_field_name protoreflect.Name = "syntax"
+
+ Enum_Name_field_fullname protoreflect.FullName = "google.protobuf.Enum.name"
+ Enum_Enumvalue_field_fullname protoreflect.FullName = "google.protobuf.Enum.enumvalue"
+ Enum_Options_field_fullname protoreflect.FullName = "google.protobuf.Enum.options"
+ Enum_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Enum.source_context"
+ Enum_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Enum.syntax"
+)
+
+// Field numbers for google.protobuf.Enum.
+const (
+ Enum_Name_field_number protoreflect.FieldNumber = 1
+ Enum_Enumvalue_field_number protoreflect.FieldNumber = 2
+ Enum_Options_field_number protoreflect.FieldNumber = 3
+ Enum_SourceContext_field_number protoreflect.FieldNumber = 4
+ Enum_Syntax_field_number protoreflect.FieldNumber = 5
+)
+
+// Names for google.protobuf.EnumValue.
+const (
+ EnumValue_message_name protoreflect.Name = "EnumValue"
+ EnumValue_message_fullname protoreflect.FullName = "google.protobuf.EnumValue"
+)
+
+// Field names for google.protobuf.EnumValue.
+const (
+ EnumValue_Name_field_name protoreflect.Name = "name"
+ EnumValue_Number_field_name protoreflect.Name = "number"
+ EnumValue_Options_field_name protoreflect.Name = "options"
+
+ EnumValue_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.name"
+ EnumValue_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.number"
+ EnumValue_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.options"
+)
+
+// Field numbers for google.protobuf.EnumValue.
+const (
+ EnumValue_Name_field_number protoreflect.FieldNumber = 1
+ EnumValue_Number_field_number protoreflect.FieldNumber = 2
+ EnumValue_Options_field_number protoreflect.FieldNumber = 3
+)
+
+// Names for google.protobuf.Option.
+const (
+ Option_message_name protoreflect.Name = "Option"
+ Option_message_fullname protoreflect.FullName = "google.protobuf.Option"
+)
+
+// Field names for google.protobuf.Option.
+const (
+ Option_Name_field_name protoreflect.Name = "name"
+ Option_Value_field_name protoreflect.Name = "value"
+
+ Option_Name_field_fullname protoreflect.FullName = "google.protobuf.Option.name"
+ Option_Value_field_fullname protoreflect.FullName = "google.protobuf.Option.value"
+)
+
+// Field numbers for google.protobuf.Option.
+const (
+ Option_Name_field_number protoreflect.FieldNumber = 1
+ Option_Value_field_number protoreflect.FieldNumber = 2
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
new file mode 100644
index 0000000..429384b
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
@@ -0,0 +1,13 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package genid
+
+import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+
+// Generic field name and number for messages in wrappers.proto.
+const (
+ WrapperValue_Value_field_name protoreflect.Name = "value"
+ WrapperValue_Value_field_number protoreflect.FieldNumber = 1
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go
new file mode 100644
index 0000000..72527d2
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go
@@ -0,0 +1,175 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package genid
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+const File_google_protobuf_wrappers_proto = "google/protobuf/wrappers.proto"
+
+// Names for google.protobuf.DoubleValue.
+const (
+ DoubleValue_message_name protoreflect.Name = "DoubleValue"
+ DoubleValue_message_fullname protoreflect.FullName = "google.protobuf.DoubleValue"
+)
+
+// Field names for google.protobuf.DoubleValue.
+const (
+ DoubleValue_Value_field_name protoreflect.Name = "value"
+
+ DoubleValue_Value_field_fullname protoreflect.FullName = "google.protobuf.DoubleValue.value"
+)
+
+// Field numbers for google.protobuf.DoubleValue.
+const (
+ DoubleValue_Value_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.FloatValue.
+const (
+ FloatValue_message_name protoreflect.Name = "FloatValue"
+ FloatValue_message_fullname protoreflect.FullName = "google.protobuf.FloatValue"
+)
+
+// Field names for google.protobuf.FloatValue.
+const (
+ FloatValue_Value_field_name protoreflect.Name = "value"
+
+ FloatValue_Value_field_fullname protoreflect.FullName = "google.protobuf.FloatValue.value"
+)
+
+// Field numbers for google.protobuf.FloatValue.
+const (
+ FloatValue_Value_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.Int64Value.
+const (
+ Int64Value_message_name protoreflect.Name = "Int64Value"
+ Int64Value_message_fullname protoreflect.FullName = "google.protobuf.Int64Value"
+)
+
+// Field names for google.protobuf.Int64Value.
+const (
+ Int64Value_Value_field_name protoreflect.Name = "value"
+
+ Int64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int64Value.value"
+)
+
+// Field numbers for google.protobuf.Int64Value.
+const (
+ Int64Value_Value_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.UInt64Value.
+const (
+ UInt64Value_message_name protoreflect.Name = "UInt64Value"
+ UInt64Value_message_fullname protoreflect.FullName = "google.protobuf.UInt64Value"
+)
+
+// Field names for google.protobuf.UInt64Value.
+const (
+ UInt64Value_Value_field_name protoreflect.Name = "value"
+
+ UInt64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt64Value.value"
+)
+
+// Field numbers for google.protobuf.UInt64Value.
+const (
+ UInt64Value_Value_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.Int32Value.
+const (
+ Int32Value_message_name protoreflect.Name = "Int32Value"
+ Int32Value_message_fullname protoreflect.FullName = "google.protobuf.Int32Value"
+)
+
+// Field names for google.protobuf.Int32Value.
+const (
+ Int32Value_Value_field_name protoreflect.Name = "value"
+
+ Int32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int32Value.value"
+)
+
+// Field numbers for google.protobuf.Int32Value.
+const (
+ Int32Value_Value_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.UInt32Value.
+const (
+ UInt32Value_message_name protoreflect.Name = "UInt32Value"
+ UInt32Value_message_fullname protoreflect.FullName = "google.protobuf.UInt32Value"
+)
+
+// Field names for google.protobuf.UInt32Value.
+const (
+ UInt32Value_Value_field_name protoreflect.Name = "value"
+
+ UInt32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt32Value.value"
+)
+
+// Field numbers for google.protobuf.UInt32Value.
+const (
+ UInt32Value_Value_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.BoolValue.
+const (
+ BoolValue_message_name protoreflect.Name = "BoolValue"
+ BoolValue_message_fullname protoreflect.FullName = "google.protobuf.BoolValue"
+)
+
+// Field names for google.protobuf.BoolValue.
+const (
+ BoolValue_Value_field_name protoreflect.Name = "value"
+
+ BoolValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BoolValue.value"
+)
+
+// Field numbers for google.protobuf.BoolValue.
+const (
+ BoolValue_Value_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.StringValue.
+const (
+ StringValue_message_name protoreflect.Name = "StringValue"
+ StringValue_message_fullname protoreflect.FullName = "google.protobuf.StringValue"
+)
+
+// Field names for google.protobuf.StringValue.
+const (
+ StringValue_Value_field_name protoreflect.Name = "value"
+
+ StringValue_Value_field_fullname protoreflect.FullName = "google.protobuf.StringValue.value"
+)
+
+// Field numbers for google.protobuf.StringValue.
+const (
+ StringValue_Value_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.BytesValue.
+const (
+ BytesValue_message_name protoreflect.Name = "BytesValue"
+ BytesValue_message_fullname protoreflect.FullName = "google.protobuf.BytesValue"
+)
+
+// Field names for google.protobuf.BytesValue.
+const (
+ BytesValue_Value_field_name protoreflect.Name = "value"
+
+ BytesValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BytesValue.value"
+)
+
+// Field numbers for google.protobuf.BytesValue.
+const (
+ BytesValue_Value_field_number protoreflect.FieldNumber = 1
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genname/name.go b/vendor/google.golang.org/protobuf/internal/genname/name.go
deleted file mode 100644
index f45509f..0000000
--- a/vendor/google.golang.org/protobuf/internal/genname/name.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package genname contains constants for generated names.
-package genname
-
-const (
- State = "state"
-
- SizeCache = "sizeCache"
- SizeCacheA = "XXX_sizecache"
-
- WeakFields = "weakFields"
- WeakFieldsA = "XXX_weak"
-
- UnknownFields = "unknownFields"
- UnknownFieldsA = "XXX_unrecognized"
-
- ExtensionFields = "extensionFields"
- ExtensionFieldsA = "XXX_InternalExtensions"
- ExtensionFieldsB = "XXX_extensions"
-
- WeakFieldPrefix = "XXX_weak_"
-)
diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go
index 4d22c96..abee5f3 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go
@@ -10,6 +10,7 @@
"strconv"
"google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/proto"
pref "google.golang.org/protobuf/reflect/protoreflect"
piface "google.golang.org/protobuf/runtime/protoiface"
@@ -19,6 +20,12 @@
// functions that we do not want to appear in godoc.
type Export struct{}
+// NewError formats a string according to the format specifier and arguments and
+// returns an error that has a "proto" prefix.
+func (Export) NewError(f string, x ...interface{}) error {
+ return errors.New(f, x...)
+}
+
// enum is any enum type generated by protoc-gen-go
// and must be a named int32 type.
type enum = interface{}
@@ -160,7 +167,7 @@
if mv := (Export{}).protoMessageV2Of(m); mv != nil {
return mv.ProtoReflect().Type()
}
- return legacyLoadMessageInfo(reflect.TypeOf(m), "")
+ return legacyLoadMessageType(reflect.TypeOf(m), "")
}
// MessageStringOf returns the message value as a string,
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
index c00744d..cb4b482 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
@@ -10,6 +10,7 @@
"sync"
"google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/proto"
pref "google.golang.org/protobuf/reflect/protoreflect"
preg "google.golang.org/protobuf/reflect/protoregistry"
@@ -20,6 +21,7 @@
func (errInvalidUTF8) Error() string { return "string field contains invalid UTF-8" }
func (errInvalidUTF8) InvalidUTF8() bool { return true }
+func (errInvalidUTF8) Unwrap() error { return errors.Error }
// initOneofFieldCoders initializes the fast-path functions for the fields in a oneof.
//
@@ -242,7 +244,7 @@
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
if p.Elem().IsNil() {
p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
@@ -276,7 +278,7 @@
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
Buf: v,
@@ -420,7 +422,7 @@
}
b, n := protowire.ConsumeGroup(num, b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
Buf: b,
@@ -494,7 +496,7 @@
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
m := reflect.New(f.mi.GoReflectType.Elem()).Interface()
mp := pointerOfIface(m)
@@ -550,7 +552,7 @@
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
mp := reflect.New(goType.Elem())
o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
@@ -613,7 +615,7 @@
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return pref.Value{}, out, protowire.ParseError(n)
+ return pref.Value{}, out, errDecode
}
m := list.NewElement()
o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
@@ -681,7 +683,7 @@
}
b, n := protowire.ConsumeGroup(num, b)
if n < 0 {
- return pref.Value{}, out, protowire.ParseError(n)
+ return pref.Value{}, out, errDecode
}
m := list.NewElement()
o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
@@ -767,7 +769,7 @@
}
b, n := protowire.ConsumeGroup(num, b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
mp := reflect.New(goType.Elem())
o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go
index 2c43b11..1a509b6 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go
@@ -15,13 +15,13 @@
)
// sizeBool returns the size of wire encoding a bool pointer as a Bool.
-func sizeBool(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeBool(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.Bool()
return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v))
}
// appendBool wire encodes a bool pointer as a Bool.
-func appendBool(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendBool(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Bool()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendVarint(b, protowire.EncodeBool(v))
@@ -29,7 +29,7 @@
}
// consumeBool wire decodes a bool pointer as a Bool.
-func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return out, errUnknown
}
@@ -45,7 +45,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*p.Bool() = protowire.DecodeBool(v)
out.n = n
@@ -61,7 +61,7 @@
// sizeBoolNoZero returns the size of wire encoding a bool pointer as a Bool.
// The zero value is not encoded.
-func sizeBoolNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeBoolNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.Bool()
if v == false {
return 0
@@ -71,7 +71,7 @@
// appendBoolNoZero wire encodes a bool pointer as a Bool.
// The zero value is not encoded.
-func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Bool()
if v == false {
return b, nil
@@ -90,14 +90,14 @@
// sizeBoolPtr returns the size of wire encoding a *bool pointer as a Bool.
// It panics if the pointer is nil.
-func sizeBoolPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeBoolPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := **p.BoolPtr()
return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v))
}
// appendBoolPtr wire encodes a *bool pointer as a Bool.
// It panics if the pointer is nil.
-func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := **p.BoolPtr()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendVarint(b, protowire.EncodeBool(v))
@@ -105,7 +105,7 @@
}
// consumeBoolPtr wire decodes a *bool pointer as a Bool.
-func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return out, errUnknown
}
@@ -121,7 +121,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
vp := p.BoolPtr()
if *vp == nil {
@@ -140,7 +140,7 @@
}
// sizeBoolSlice returns the size of wire encoding a []bool pointer as a repeated Bool.
-func sizeBoolSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeBoolSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.BoolSlice()
for _, v := range s {
size += f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v))
@@ -149,7 +149,7 @@
}
// appendBoolSlice encodes a []bool pointer as a repeated Bool.
-func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.BoolSlice()
for _, v := range s {
b = protowire.AppendVarint(b, f.wiretag)
@@ -159,13 +159,13 @@
}
// consumeBoolSlice wire decodes a []bool pointer as a repeated Bool.
-func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.BoolSlice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
for len(b) > 0 {
var v uint64
@@ -180,7 +180,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
s = append(s, protowire.DecodeBool(v))
b = b[n:]
@@ -204,7 +204,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*sp = append(*sp, protowire.DecodeBool(v))
out.n = n
@@ -219,7 +219,7 @@
}
// sizeBoolPackedSlice returns the size of wire encoding a []bool pointer as a packed repeated Bool.
-func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.BoolSlice()
if len(s) == 0 {
return 0
@@ -232,7 +232,7 @@
}
// appendBoolPackedSlice encodes a []bool pointer as a packed repeated Bool.
-func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.BoolSlice()
if len(s) == 0 {
return b, nil
@@ -257,19 +257,19 @@
}
// sizeBoolValue returns the size of wire encoding a bool value as a Bool.
-func sizeBoolValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeBoolValue(v protoreflect.Value, tagsize int, opts marshalOptions) int {
return tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool()))
}
// appendBoolValue encodes a bool value as a Bool.
-func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
b = protowire.AppendVarint(b, wiretag)
b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool()))
return b, nil
}
// consumeBoolValue decodes a bool value as a Bool.
-func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return protoreflect.Value{}, out, errUnknown
}
@@ -285,7 +285,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
out.n = n
return protoreflect.ValueOfBool(protowire.DecodeBool(v)), out, nil
@@ -299,7 +299,7 @@
}
// sizeBoolSliceValue returns the size of wire encoding a []bool value as a repeated Bool.
-func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -309,7 +309,7 @@
}
// appendBoolSliceValue encodes a []bool value as a repeated Bool.
-func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -320,12 +320,12 @@
}
// consumeBoolSliceValue wire decodes a []bool value as a repeated Bool.
-func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
list := listv.List()
if wtyp == protowire.BytesType {
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
for len(b) > 0 {
var v uint64
@@ -340,7 +340,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v)))
b = b[n:]
@@ -363,7 +363,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v)))
out.n = n
@@ -378,7 +378,7 @@
}
// sizeBoolPackedSliceValue returns the size of wire encoding a []bool value as a packed repeated Bool.
-func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -393,7 +393,7 @@
}
// appendBoolPackedSliceValue encodes a []bool value as a packed repeated Bool.
-func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -421,19 +421,19 @@
}
// sizeEnumValue returns the size of wire encoding a value as a Enum.
-func sizeEnumValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeEnumValue(v protoreflect.Value, tagsize int, opts marshalOptions) int {
return tagsize + protowire.SizeVarint(uint64(v.Enum()))
}
// appendEnumValue encodes a value as a Enum.
-func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
b = protowire.AppendVarint(b, wiretag)
b = protowire.AppendVarint(b, uint64(v.Enum()))
return b, nil
}
// consumeEnumValue decodes a value as a Enum.
-func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return protoreflect.Value{}, out, errUnknown
}
@@ -449,7 +449,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
out.n = n
return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), out, nil
@@ -463,7 +463,7 @@
}
// sizeEnumSliceValue returns the size of wire encoding a [] value as a repeated Enum.
-func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -473,7 +473,7 @@
}
// appendEnumSliceValue encodes a [] value as a repeated Enum.
-func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -484,12 +484,12 @@
}
// consumeEnumSliceValue wire decodes a [] value as a repeated Enum.
-func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
list := listv.List()
if wtyp == protowire.BytesType {
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
for len(b) > 0 {
var v uint64
@@ -504,7 +504,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)))
b = b[n:]
@@ -527,7 +527,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)))
out.n = n
@@ -542,7 +542,7 @@
}
// sizeEnumPackedSliceValue returns the size of wire encoding a [] value as a packed repeated Enum.
-func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -557,7 +557,7 @@
}
// appendEnumPackedSliceValue encodes a [] value as a packed repeated Enum.
-func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -585,13 +585,13 @@
}
// sizeInt32 returns the size of wire encoding a int32 pointer as a Int32.
-func sizeInt32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeInt32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.Int32()
return f.tagsize + protowire.SizeVarint(uint64(v))
}
// appendInt32 wire encodes a int32 pointer as a Int32.
-func appendInt32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendInt32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Int32()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendVarint(b, uint64(v))
@@ -599,7 +599,7 @@
}
// consumeInt32 wire decodes a int32 pointer as a Int32.
-func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return out, errUnknown
}
@@ -615,7 +615,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*p.Int32() = int32(v)
out.n = n
@@ -631,7 +631,7 @@
// sizeInt32NoZero returns the size of wire encoding a int32 pointer as a Int32.
// The zero value is not encoded.
-func sizeInt32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeInt32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.Int32()
if v == 0 {
return 0
@@ -641,7 +641,7 @@
// appendInt32NoZero wire encodes a int32 pointer as a Int32.
// The zero value is not encoded.
-func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Int32()
if v == 0 {
return b, nil
@@ -660,14 +660,14 @@
// sizeInt32Ptr returns the size of wire encoding a *int32 pointer as a Int32.
// It panics if the pointer is nil.
-func sizeInt32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeInt32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := **p.Int32Ptr()
return f.tagsize + protowire.SizeVarint(uint64(v))
}
// appendInt32Ptr wire encodes a *int32 pointer as a Int32.
// It panics if the pointer is nil.
-func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := **p.Int32Ptr()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendVarint(b, uint64(v))
@@ -675,7 +675,7 @@
}
// consumeInt32Ptr wire decodes a *int32 pointer as a Int32.
-func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return out, errUnknown
}
@@ -691,7 +691,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
vp := p.Int32Ptr()
if *vp == nil {
@@ -710,7 +710,7 @@
}
// sizeInt32Slice returns the size of wire encoding a []int32 pointer as a repeated Int32.
-func sizeInt32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeInt32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Int32Slice()
for _, v := range s {
size += f.tagsize + protowire.SizeVarint(uint64(v))
@@ -719,7 +719,7 @@
}
// appendInt32Slice encodes a []int32 pointer as a repeated Int32.
-func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Int32Slice()
for _, v := range s {
b = protowire.AppendVarint(b, f.wiretag)
@@ -729,13 +729,13 @@
}
// consumeInt32Slice wire decodes a []int32 pointer as a repeated Int32.
-func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int32Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
for len(b) > 0 {
var v uint64
@@ -750,7 +750,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
s = append(s, int32(v))
b = b[n:]
@@ -774,7 +774,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*sp = append(*sp, int32(v))
out.n = n
@@ -789,7 +789,7 @@
}
// sizeInt32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Int32.
-func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Int32Slice()
if len(s) == 0 {
return 0
@@ -802,7 +802,7 @@
}
// appendInt32PackedSlice encodes a []int32 pointer as a packed repeated Int32.
-func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Int32Slice()
if len(s) == 0 {
return b, nil
@@ -827,19 +827,19 @@
}
// sizeInt32Value returns the size of wire encoding a int32 value as a Int32.
-func sizeInt32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeInt32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
return tagsize + protowire.SizeVarint(uint64(int32(v.Int())))
}
// appendInt32Value encodes a int32 value as a Int32.
-func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
b = protowire.AppendVarint(b, wiretag)
b = protowire.AppendVarint(b, uint64(int32(v.Int())))
return b, nil
}
// consumeInt32Value decodes a int32 value as a Int32.
-func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return protoreflect.Value{}, out, errUnknown
}
@@ -855,7 +855,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
out.n = n
return protoreflect.ValueOfInt32(int32(v)), out, nil
@@ -869,7 +869,7 @@
}
// sizeInt32SliceValue returns the size of wire encoding a []int32 value as a repeated Int32.
-func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -879,7 +879,7 @@
}
// appendInt32SliceValue encodes a []int32 value as a repeated Int32.
-func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -890,12 +890,12 @@
}
// consumeInt32SliceValue wire decodes a []int32 value as a repeated Int32.
-func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
list := listv.List()
if wtyp == protowire.BytesType {
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
for len(b) > 0 {
var v uint64
@@ -910,7 +910,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfInt32(int32(v)))
b = b[n:]
@@ -933,7 +933,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfInt32(int32(v)))
out.n = n
@@ -948,7 +948,7 @@
}
// sizeInt32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Int32.
-func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -963,7 +963,7 @@
}
// appendInt32PackedSliceValue encodes a []int32 value as a packed repeated Int32.
-func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -991,13 +991,13 @@
}
// sizeSint32 returns the size of wire encoding a int32 pointer as a Sint32.
-func sizeSint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.Int32()
return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
}
// appendSint32 wire encodes a int32 pointer as a Sint32.
-func appendSint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Int32()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v)))
@@ -1005,7 +1005,7 @@
}
// consumeSint32 wire decodes a int32 pointer as a Sint32.
-func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return out, errUnknown
}
@@ -1021,7 +1021,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*p.Int32() = int32(protowire.DecodeZigZag(v & math.MaxUint32))
out.n = n
@@ -1037,7 +1037,7 @@
// sizeSint32NoZero returns the size of wire encoding a int32 pointer as a Sint32.
// The zero value is not encoded.
-func sizeSint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.Int32()
if v == 0 {
return 0
@@ -1047,7 +1047,7 @@
// appendSint32NoZero wire encodes a int32 pointer as a Sint32.
// The zero value is not encoded.
-func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Int32()
if v == 0 {
return b, nil
@@ -1066,14 +1066,14 @@
// sizeSint32Ptr returns the size of wire encoding a *int32 pointer as a Sint32.
// It panics if the pointer is nil.
-func sizeSint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := **p.Int32Ptr()
return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
}
// appendSint32Ptr wire encodes a *int32 pointer as a Sint32.
// It panics if the pointer is nil.
-func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := **p.Int32Ptr()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v)))
@@ -1081,7 +1081,7 @@
}
// consumeSint32Ptr wire decodes a *int32 pointer as a Sint32.
-func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return out, errUnknown
}
@@ -1097,7 +1097,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
vp := p.Int32Ptr()
if *vp == nil {
@@ -1116,7 +1116,7 @@
}
// sizeSint32Slice returns the size of wire encoding a []int32 pointer as a repeated Sint32.
-func sizeSint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Int32Slice()
for _, v := range s {
size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
@@ -1125,7 +1125,7 @@
}
// appendSint32Slice encodes a []int32 pointer as a repeated Sint32.
-func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Int32Slice()
for _, v := range s {
b = protowire.AppendVarint(b, f.wiretag)
@@ -1135,13 +1135,13 @@
}
// consumeSint32Slice wire decodes a []int32 pointer as a repeated Sint32.
-func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int32Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
for len(b) > 0 {
var v uint64
@@ -1156,7 +1156,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
s = append(s, int32(protowire.DecodeZigZag(v&math.MaxUint32)))
b = b[n:]
@@ -1180,7 +1180,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*sp = append(*sp, int32(protowire.DecodeZigZag(v&math.MaxUint32)))
out.n = n
@@ -1195,7 +1195,7 @@
}
// sizeSint32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sint32.
-func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Int32Slice()
if len(s) == 0 {
return 0
@@ -1208,7 +1208,7 @@
}
// appendSint32PackedSlice encodes a []int32 pointer as a packed repeated Sint32.
-func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Int32Slice()
if len(s) == 0 {
return b, nil
@@ -1233,19 +1233,19 @@
}
// sizeSint32Value returns the size of wire encoding a int32 value as a Sint32.
-func sizeSint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeSint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int()))))
}
// appendSint32Value encodes a int32 value as a Sint32.
-func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
b = protowire.AppendVarint(b, wiretag)
b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int()))))
return b, nil
}
// consumeSint32Value decodes a int32 value as a Sint32.
-func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return protoreflect.Value{}, out, errUnknown
}
@@ -1261,7 +1261,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
out.n = n
return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), out, nil
@@ -1275,7 +1275,7 @@
}
// sizeSint32SliceValue returns the size of wire encoding a []int32 value as a repeated Sint32.
-func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -1285,7 +1285,7 @@
}
// appendSint32SliceValue encodes a []int32 value as a repeated Sint32.
-func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -1296,12 +1296,12 @@
}
// consumeSint32SliceValue wire decodes a []int32 value as a repeated Sint32.
-func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
list := listv.List()
if wtyp == protowire.BytesType {
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
for len(b) > 0 {
var v uint64
@@ -1316,7 +1316,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))))
b = b[n:]
@@ -1339,7 +1339,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))))
out.n = n
@@ -1354,7 +1354,7 @@
}
// sizeSint32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sint32.
-func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -1369,7 +1369,7 @@
}
// appendSint32PackedSliceValue encodes a []int32 value as a packed repeated Sint32.
-func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -1397,13 +1397,13 @@
}
// sizeUint32 returns the size of wire encoding a uint32 pointer as a Uint32.
-func sizeUint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeUint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.Uint32()
return f.tagsize + protowire.SizeVarint(uint64(v))
}
// appendUint32 wire encodes a uint32 pointer as a Uint32.
-func appendUint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendUint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Uint32()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendVarint(b, uint64(v))
@@ -1411,7 +1411,7 @@
}
// consumeUint32 wire decodes a uint32 pointer as a Uint32.
-func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return out, errUnknown
}
@@ -1427,7 +1427,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*p.Uint32() = uint32(v)
out.n = n
@@ -1443,7 +1443,7 @@
// sizeUint32NoZero returns the size of wire encoding a uint32 pointer as a Uint32.
// The zero value is not encoded.
-func sizeUint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeUint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.Uint32()
if v == 0 {
return 0
@@ -1453,7 +1453,7 @@
// appendUint32NoZero wire encodes a uint32 pointer as a Uint32.
// The zero value is not encoded.
-func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Uint32()
if v == 0 {
return b, nil
@@ -1472,14 +1472,14 @@
// sizeUint32Ptr returns the size of wire encoding a *uint32 pointer as a Uint32.
// It panics if the pointer is nil.
-func sizeUint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeUint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := **p.Uint32Ptr()
return f.tagsize + protowire.SizeVarint(uint64(v))
}
// appendUint32Ptr wire encodes a *uint32 pointer as a Uint32.
// It panics if the pointer is nil.
-func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := **p.Uint32Ptr()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendVarint(b, uint64(v))
@@ -1487,7 +1487,7 @@
}
// consumeUint32Ptr wire decodes a *uint32 pointer as a Uint32.
-func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return out, errUnknown
}
@@ -1503,7 +1503,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
vp := p.Uint32Ptr()
if *vp == nil {
@@ -1522,7 +1522,7 @@
}
// sizeUint32Slice returns the size of wire encoding a []uint32 pointer as a repeated Uint32.
-func sizeUint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeUint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Uint32Slice()
for _, v := range s {
size += f.tagsize + protowire.SizeVarint(uint64(v))
@@ -1531,7 +1531,7 @@
}
// appendUint32Slice encodes a []uint32 pointer as a repeated Uint32.
-func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Uint32Slice()
for _, v := range s {
b = protowire.AppendVarint(b, f.wiretag)
@@ -1541,13 +1541,13 @@
}
// consumeUint32Slice wire decodes a []uint32 pointer as a repeated Uint32.
-func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Uint32Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
for len(b) > 0 {
var v uint64
@@ -1562,7 +1562,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
s = append(s, uint32(v))
b = b[n:]
@@ -1586,7 +1586,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*sp = append(*sp, uint32(v))
out.n = n
@@ -1601,7 +1601,7 @@
}
// sizeUint32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Uint32.
-func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Uint32Slice()
if len(s) == 0 {
return 0
@@ -1614,7 +1614,7 @@
}
// appendUint32PackedSlice encodes a []uint32 pointer as a packed repeated Uint32.
-func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Uint32Slice()
if len(s) == 0 {
return b, nil
@@ -1639,19 +1639,19 @@
}
// sizeUint32Value returns the size of wire encoding a uint32 value as a Uint32.
-func sizeUint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeUint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
return tagsize + protowire.SizeVarint(uint64(uint32(v.Uint())))
}
// appendUint32Value encodes a uint32 value as a Uint32.
-func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
b = protowire.AppendVarint(b, wiretag)
b = protowire.AppendVarint(b, uint64(uint32(v.Uint())))
return b, nil
}
// consumeUint32Value decodes a uint32 value as a Uint32.
-func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return protoreflect.Value{}, out, errUnknown
}
@@ -1667,7 +1667,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
out.n = n
return protoreflect.ValueOfUint32(uint32(v)), out, nil
@@ -1681,7 +1681,7 @@
}
// sizeUint32SliceValue returns the size of wire encoding a []uint32 value as a repeated Uint32.
-func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -1691,7 +1691,7 @@
}
// appendUint32SliceValue encodes a []uint32 value as a repeated Uint32.
-func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -1702,12 +1702,12 @@
}
// consumeUint32SliceValue wire decodes a []uint32 value as a repeated Uint32.
-func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
list := listv.List()
if wtyp == protowire.BytesType {
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
for len(b) > 0 {
var v uint64
@@ -1722,7 +1722,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfUint32(uint32(v)))
b = b[n:]
@@ -1745,7 +1745,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfUint32(uint32(v)))
out.n = n
@@ -1760,7 +1760,7 @@
}
// sizeUint32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Uint32.
-func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -1775,7 +1775,7 @@
}
// appendUint32PackedSliceValue encodes a []uint32 value as a packed repeated Uint32.
-func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -1803,13 +1803,13 @@
}
// sizeInt64 returns the size of wire encoding a int64 pointer as a Int64.
-func sizeInt64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeInt64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.Int64()
return f.tagsize + protowire.SizeVarint(uint64(v))
}
// appendInt64 wire encodes a int64 pointer as a Int64.
-func appendInt64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendInt64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Int64()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendVarint(b, uint64(v))
@@ -1817,7 +1817,7 @@
}
// consumeInt64 wire decodes a int64 pointer as a Int64.
-func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return out, errUnknown
}
@@ -1833,7 +1833,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*p.Int64() = int64(v)
out.n = n
@@ -1849,7 +1849,7 @@
// sizeInt64NoZero returns the size of wire encoding a int64 pointer as a Int64.
// The zero value is not encoded.
-func sizeInt64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeInt64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.Int64()
if v == 0 {
return 0
@@ -1859,7 +1859,7 @@
// appendInt64NoZero wire encodes a int64 pointer as a Int64.
// The zero value is not encoded.
-func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Int64()
if v == 0 {
return b, nil
@@ -1878,14 +1878,14 @@
// sizeInt64Ptr returns the size of wire encoding a *int64 pointer as a Int64.
// It panics if the pointer is nil.
-func sizeInt64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeInt64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := **p.Int64Ptr()
return f.tagsize + protowire.SizeVarint(uint64(v))
}
// appendInt64Ptr wire encodes a *int64 pointer as a Int64.
// It panics if the pointer is nil.
-func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := **p.Int64Ptr()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendVarint(b, uint64(v))
@@ -1893,7 +1893,7 @@
}
// consumeInt64Ptr wire decodes a *int64 pointer as a Int64.
-func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return out, errUnknown
}
@@ -1909,7 +1909,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
vp := p.Int64Ptr()
if *vp == nil {
@@ -1928,7 +1928,7 @@
}
// sizeInt64Slice returns the size of wire encoding a []int64 pointer as a repeated Int64.
-func sizeInt64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeInt64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Int64Slice()
for _, v := range s {
size += f.tagsize + protowire.SizeVarint(uint64(v))
@@ -1937,7 +1937,7 @@
}
// appendInt64Slice encodes a []int64 pointer as a repeated Int64.
-func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Int64Slice()
for _, v := range s {
b = protowire.AppendVarint(b, f.wiretag)
@@ -1947,13 +1947,13 @@
}
// consumeInt64Slice wire decodes a []int64 pointer as a repeated Int64.
-func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int64Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
for len(b) > 0 {
var v uint64
@@ -1968,7 +1968,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
s = append(s, int64(v))
b = b[n:]
@@ -1992,7 +1992,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*sp = append(*sp, int64(v))
out.n = n
@@ -2007,7 +2007,7 @@
}
// sizeInt64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Int64.
-func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Int64Slice()
if len(s) == 0 {
return 0
@@ -2020,7 +2020,7 @@
}
// appendInt64PackedSlice encodes a []int64 pointer as a packed repeated Int64.
-func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Int64Slice()
if len(s) == 0 {
return b, nil
@@ -2045,19 +2045,19 @@
}
// sizeInt64Value returns the size of wire encoding a int64 value as a Int64.
-func sizeInt64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeInt64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
return tagsize + protowire.SizeVarint(uint64(v.Int()))
}
// appendInt64Value encodes a int64 value as a Int64.
-func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
b = protowire.AppendVarint(b, wiretag)
b = protowire.AppendVarint(b, uint64(v.Int()))
return b, nil
}
// consumeInt64Value decodes a int64 value as a Int64.
-func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return protoreflect.Value{}, out, errUnknown
}
@@ -2073,7 +2073,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
out.n = n
return protoreflect.ValueOfInt64(int64(v)), out, nil
@@ -2087,7 +2087,7 @@
}
// sizeInt64SliceValue returns the size of wire encoding a []int64 value as a repeated Int64.
-func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -2097,7 +2097,7 @@
}
// appendInt64SliceValue encodes a []int64 value as a repeated Int64.
-func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -2108,12 +2108,12 @@
}
// consumeInt64SliceValue wire decodes a []int64 value as a repeated Int64.
-func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
list := listv.List()
if wtyp == protowire.BytesType {
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
for len(b) > 0 {
var v uint64
@@ -2128,7 +2128,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfInt64(int64(v)))
b = b[n:]
@@ -2151,7 +2151,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfInt64(int64(v)))
out.n = n
@@ -2166,7 +2166,7 @@
}
// sizeInt64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Int64.
-func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -2181,7 +2181,7 @@
}
// appendInt64PackedSliceValue encodes a []int64 value as a packed repeated Int64.
-func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -2209,13 +2209,13 @@
}
// sizeSint64 returns the size of wire encoding a int64 pointer as a Sint64.
-func sizeSint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.Int64()
return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v))
}
// appendSint64 wire encodes a int64 pointer as a Sint64.
-func appendSint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Int64()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendVarint(b, protowire.EncodeZigZag(v))
@@ -2223,7 +2223,7 @@
}
// consumeSint64 wire decodes a int64 pointer as a Sint64.
-func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return out, errUnknown
}
@@ -2239,7 +2239,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*p.Int64() = protowire.DecodeZigZag(v)
out.n = n
@@ -2255,7 +2255,7 @@
// sizeSint64NoZero returns the size of wire encoding a int64 pointer as a Sint64.
// The zero value is not encoded.
-func sizeSint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.Int64()
if v == 0 {
return 0
@@ -2265,7 +2265,7 @@
// appendSint64NoZero wire encodes a int64 pointer as a Sint64.
// The zero value is not encoded.
-func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Int64()
if v == 0 {
return b, nil
@@ -2284,14 +2284,14 @@
// sizeSint64Ptr returns the size of wire encoding a *int64 pointer as a Sint64.
// It panics if the pointer is nil.
-func sizeSint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := **p.Int64Ptr()
return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v))
}
// appendSint64Ptr wire encodes a *int64 pointer as a Sint64.
// It panics if the pointer is nil.
-func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := **p.Int64Ptr()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendVarint(b, protowire.EncodeZigZag(v))
@@ -2299,7 +2299,7 @@
}
// consumeSint64Ptr wire decodes a *int64 pointer as a Sint64.
-func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return out, errUnknown
}
@@ -2315,7 +2315,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
vp := p.Int64Ptr()
if *vp == nil {
@@ -2334,7 +2334,7 @@
}
// sizeSint64Slice returns the size of wire encoding a []int64 pointer as a repeated Sint64.
-func sizeSint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Int64Slice()
for _, v := range s {
size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v))
@@ -2343,7 +2343,7 @@
}
// appendSint64Slice encodes a []int64 pointer as a repeated Sint64.
-func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Int64Slice()
for _, v := range s {
b = protowire.AppendVarint(b, f.wiretag)
@@ -2353,13 +2353,13 @@
}
// consumeSint64Slice wire decodes a []int64 pointer as a repeated Sint64.
-func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int64Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
for len(b) > 0 {
var v uint64
@@ -2374,7 +2374,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
s = append(s, protowire.DecodeZigZag(v))
b = b[n:]
@@ -2398,7 +2398,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*sp = append(*sp, protowire.DecodeZigZag(v))
out.n = n
@@ -2413,7 +2413,7 @@
}
// sizeSint64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sint64.
-func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Int64Slice()
if len(s) == 0 {
return 0
@@ -2426,7 +2426,7 @@
}
// appendSint64PackedSlice encodes a []int64 pointer as a packed repeated Sint64.
-func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Int64Slice()
if len(s) == 0 {
return b, nil
@@ -2451,19 +2451,19 @@
}
// sizeSint64Value returns the size of wire encoding a int64 value as a Sint64.
-func sizeSint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeSint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int()))
}
// appendSint64Value encodes a int64 value as a Sint64.
-func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
b = protowire.AppendVarint(b, wiretag)
b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int()))
return b, nil
}
// consumeSint64Value decodes a int64 value as a Sint64.
-func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return protoreflect.Value{}, out, errUnknown
}
@@ -2479,7 +2479,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
out.n = n
return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), out, nil
@@ -2493,7 +2493,7 @@
}
// sizeSint64SliceValue returns the size of wire encoding a []int64 value as a repeated Sint64.
-func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -2503,7 +2503,7 @@
}
// appendSint64SliceValue encodes a []int64 value as a repeated Sint64.
-func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -2514,12 +2514,12 @@
}
// consumeSint64SliceValue wire decodes a []int64 value as a repeated Sint64.
-func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
list := listv.List()
if wtyp == protowire.BytesType {
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
for len(b) > 0 {
var v uint64
@@ -2534,7 +2534,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)))
b = b[n:]
@@ -2557,7 +2557,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)))
out.n = n
@@ -2572,7 +2572,7 @@
}
// sizeSint64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sint64.
-func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -2587,7 +2587,7 @@
}
// appendSint64PackedSliceValue encodes a []int64 value as a packed repeated Sint64.
-func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -2615,13 +2615,13 @@
}
// sizeUint64 returns the size of wire encoding a uint64 pointer as a Uint64.
-func sizeUint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeUint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.Uint64()
return f.tagsize + protowire.SizeVarint(v)
}
// appendUint64 wire encodes a uint64 pointer as a Uint64.
-func appendUint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendUint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Uint64()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendVarint(b, v)
@@ -2629,7 +2629,7 @@
}
// consumeUint64 wire decodes a uint64 pointer as a Uint64.
-func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return out, errUnknown
}
@@ -2645,7 +2645,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*p.Uint64() = v
out.n = n
@@ -2661,7 +2661,7 @@
// sizeUint64NoZero returns the size of wire encoding a uint64 pointer as a Uint64.
// The zero value is not encoded.
-func sizeUint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeUint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.Uint64()
if v == 0 {
return 0
@@ -2671,7 +2671,7 @@
// appendUint64NoZero wire encodes a uint64 pointer as a Uint64.
// The zero value is not encoded.
-func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Uint64()
if v == 0 {
return b, nil
@@ -2690,14 +2690,14 @@
// sizeUint64Ptr returns the size of wire encoding a *uint64 pointer as a Uint64.
// It panics if the pointer is nil.
-func sizeUint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeUint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := **p.Uint64Ptr()
return f.tagsize + protowire.SizeVarint(v)
}
// appendUint64Ptr wire encodes a *uint64 pointer as a Uint64.
// It panics if the pointer is nil.
-func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := **p.Uint64Ptr()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendVarint(b, v)
@@ -2705,7 +2705,7 @@
}
// consumeUint64Ptr wire decodes a *uint64 pointer as a Uint64.
-func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return out, errUnknown
}
@@ -2721,7 +2721,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
vp := p.Uint64Ptr()
if *vp == nil {
@@ -2740,7 +2740,7 @@
}
// sizeUint64Slice returns the size of wire encoding a []uint64 pointer as a repeated Uint64.
-func sizeUint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeUint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Uint64Slice()
for _, v := range s {
size += f.tagsize + protowire.SizeVarint(v)
@@ -2749,7 +2749,7 @@
}
// appendUint64Slice encodes a []uint64 pointer as a repeated Uint64.
-func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Uint64Slice()
for _, v := range s {
b = protowire.AppendVarint(b, f.wiretag)
@@ -2759,13 +2759,13 @@
}
// consumeUint64Slice wire decodes a []uint64 pointer as a repeated Uint64.
-func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Uint64Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
for len(b) > 0 {
var v uint64
@@ -2780,7 +2780,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
s = append(s, v)
b = b[n:]
@@ -2804,7 +2804,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*sp = append(*sp, v)
out.n = n
@@ -2819,7 +2819,7 @@
}
// sizeUint64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Uint64.
-func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Uint64Slice()
if len(s) == 0 {
return 0
@@ -2832,7 +2832,7 @@
}
// appendUint64PackedSlice encodes a []uint64 pointer as a packed repeated Uint64.
-func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Uint64Slice()
if len(s) == 0 {
return b, nil
@@ -2857,19 +2857,19 @@
}
// sizeUint64Value returns the size of wire encoding a uint64 value as a Uint64.
-func sizeUint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeUint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
return tagsize + protowire.SizeVarint(v.Uint())
}
// appendUint64Value encodes a uint64 value as a Uint64.
-func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
b = protowire.AppendVarint(b, wiretag)
b = protowire.AppendVarint(b, v.Uint())
return b, nil
}
// consumeUint64Value decodes a uint64 value as a Uint64.
-func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return protoreflect.Value{}, out, errUnknown
}
@@ -2885,7 +2885,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
out.n = n
return protoreflect.ValueOfUint64(v), out, nil
@@ -2899,7 +2899,7 @@
}
// sizeUint64SliceValue returns the size of wire encoding a []uint64 value as a repeated Uint64.
-func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -2909,7 +2909,7 @@
}
// appendUint64SliceValue encodes a []uint64 value as a repeated Uint64.
-func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -2920,12 +2920,12 @@
}
// consumeUint64SliceValue wire decodes a []uint64 value as a repeated Uint64.
-func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
list := listv.List()
if wtyp == protowire.BytesType {
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
for len(b) > 0 {
var v uint64
@@ -2940,7 +2940,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfUint64(v))
b = b[n:]
@@ -2963,7 +2963,7 @@
v, n = protowire.ConsumeVarint(b)
}
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfUint64(v))
out.n = n
@@ -2978,7 +2978,7 @@
}
// sizeUint64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Uint64.
-func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -2993,7 +2993,7 @@
}
// appendUint64PackedSliceValue encodes a []uint64 value as a packed repeated Uint64.
-func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -3021,13 +3021,13 @@
}
// sizeSfixed32 returns the size of wire encoding a int32 pointer as a Sfixed32.
-func sizeSfixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSfixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
return f.tagsize + protowire.SizeFixed32()
}
// appendSfixed32 wire encodes a int32 pointer as a Sfixed32.
-func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Int32()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendFixed32(b, uint32(v))
@@ -3035,13 +3035,13 @@
}
// consumeSfixed32 wire decodes a int32 pointer as a Sfixed32.
-func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.Fixed32Type {
return out, errUnknown
}
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*p.Int32() = int32(v)
out.n = n
@@ -3057,7 +3057,7 @@
// sizeSfixed32NoZero returns the size of wire encoding a int32 pointer as a Sfixed32.
// The zero value is not encoded.
-func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.Int32()
if v == 0 {
return 0
@@ -3067,7 +3067,7 @@
// appendSfixed32NoZero wire encodes a int32 pointer as a Sfixed32.
// The zero value is not encoded.
-func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Int32()
if v == 0 {
return b, nil
@@ -3086,13 +3086,13 @@
// sizeSfixed32Ptr returns the size of wire encoding a *int32 pointer as a Sfixed32.
// It panics if the pointer is nil.
-func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
return f.tagsize + protowire.SizeFixed32()
}
// appendSfixed32Ptr wire encodes a *int32 pointer as a Sfixed32.
// It panics if the pointer is nil.
-func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := **p.Int32Ptr()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendFixed32(b, uint32(v))
@@ -3100,13 +3100,13 @@
}
// consumeSfixed32Ptr wire decodes a *int32 pointer as a Sfixed32.
-func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.Fixed32Type {
return out, errUnknown
}
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
vp := p.Int32Ptr()
if *vp == nil {
@@ -3125,14 +3125,14 @@
}
// sizeSfixed32Slice returns the size of wire encoding a []int32 pointer as a repeated Sfixed32.
-func sizeSfixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSfixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Int32Slice()
size = len(s) * (f.tagsize + protowire.SizeFixed32())
return size
}
// appendSfixed32Slice encodes a []int32 pointer as a repeated Sfixed32.
-func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Int32Slice()
for _, v := range s {
b = protowire.AppendVarint(b, f.wiretag)
@@ -3142,18 +3142,18 @@
}
// consumeSfixed32Slice wire decodes a []int32 pointer as a repeated Sfixed32.
-func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int32Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
for len(b) > 0 {
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
s = append(s, int32(v))
b = b[n:]
@@ -3167,7 +3167,7 @@
}
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*sp = append(*sp, int32(v))
out.n = n
@@ -3182,7 +3182,7 @@
}
// sizeSfixed32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sfixed32.
-func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Int32Slice()
if len(s) == 0 {
return 0
@@ -3192,7 +3192,7 @@
}
// appendSfixed32PackedSlice encodes a []int32 pointer as a packed repeated Sfixed32.
-func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Int32Slice()
if len(s) == 0 {
return b, nil
@@ -3214,25 +3214,25 @@
}
// sizeSfixed32Value returns the size of wire encoding a int32 value as a Sfixed32.
-func sizeSfixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeSfixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
return tagsize + protowire.SizeFixed32()
}
// appendSfixed32Value encodes a int32 value as a Sfixed32.
-func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
b = protowire.AppendVarint(b, wiretag)
b = protowire.AppendFixed32(b, uint32(v.Int()))
return b, nil
}
// consumeSfixed32Value decodes a int32 value as a Sfixed32.
-func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
if wtyp != protowire.Fixed32Type {
return protoreflect.Value{}, out, errUnknown
}
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
out.n = n
return protoreflect.ValueOfInt32(int32(v)), out, nil
@@ -3246,14 +3246,14 @@
}
// sizeSfixed32SliceValue returns the size of wire encoding a []int32 value as a repeated Sfixed32.
-func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
size = list.Len() * (tagsize + protowire.SizeFixed32())
return size
}
// appendSfixed32SliceValue encodes a []int32 value as a repeated Sfixed32.
-func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -3264,17 +3264,17 @@
}
// consumeSfixed32SliceValue wire decodes a []int32 value as a repeated Sfixed32.
-func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
list := listv.List()
if wtyp == protowire.BytesType {
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
for len(b) > 0 {
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfInt32(int32(v)))
b = b[n:]
@@ -3287,7 +3287,7 @@
}
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfInt32(int32(v)))
out.n = n
@@ -3302,7 +3302,7 @@
}
// sizeSfixed32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sfixed32.
-func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -3313,7 +3313,7 @@
}
// appendSfixed32PackedSliceValue encodes a []int32 value as a packed repeated Sfixed32.
-func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -3337,13 +3337,13 @@
}
// sizeFixed32 returns the size of wire encoding a uint32 pointer as a Fixed32.
-func sizeFixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
return f.tagsize + protowire.SizeFixed32()
}
// appendFixed32 wire encodes a uint32 pointer as a Fixed32.
-func appendFixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Uint32()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendFixed32(b, v)
@@ -3351,13 +3351,13 @@
}
// consumeFixed32 wire decodes a uint32 pointer as a Fixed32.
-func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.Fixed32Type {
return out, errUnknown
}
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*p.Uint32() = v
out.n = n
@@ -3373,7 +3373,7 @@
// sizeFixed32NoZero returns the size of wire encoding a uint32 pointer as a Fixed32.
// The zero value is not encoded.
-func sizeFixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.Uint32()
if v == 0 {
return 0
@@ -3383,7 +3383,7 @@
// appendFixed32NoZero wire encodes a uint32 pointer as a Fixed32.
// The zero value is not encoded.
-func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Uint32()
if v == 0 {
return b, nil
@@ -3402,13 +3402,13 @@
// sizeFixed32Ptr returns the size of wire encoding a *uint32 pointer as a Fixed32.
// It panics if the pointer is nil.
-func sizeFixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
return f.tagsize + protowire.SizeFixed32()
}
// appendFixed32Ptr wire encodes a *uint32 pointer as a Fixed32.
// It panics if the pointer is nil.
-func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := **p.Uint32Ptr()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendFixed32(b, v)
@@ -3416,13 +3416,13 @@
}
// consumeFixed32Ptr wire decodes a *uint32 pointer as a Fixed32.
-func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.Fixed32Type {
return out, errUnknown
}
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
vp := p.Uint32Ptr()
if *vp == nil {
@@ -3441,14 +3441,14 @@
}
// sizeFixed32Slice returns the size of wire encoding a []uint32 pointer as a repeated Fixed32.
-func sizeFixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Uint32Slice()
size = len(s) * (f.tagsize + protowire.SizeFixed32())
return size
}
// appendFixed32Slice encodes a []uint32 pointer as a repeated Fixed32.
-func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Uint32Slice()
for _, v := range s {
b = protowire.AppendVarint(b, f.wiretag)
@@ -3458,18 +3458,18 @@
}
// consumeFixed32Slice wire decodes a []uint32 pointer as a repeated Fixed32.
-func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Uint32Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
for len(b) > 0 {
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
s = append(s, v)
b = b[n:]
@@ -3483,7 +3483,7 @@
}
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*sp = append(*sp, v)
out.n = n
@@ -3498,7 +3498,7 @@
}
// sizeFixed32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Fixed32.
-func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Uint32Slice()
if len(s) == 0 {
return 0
@@ -3508,7 +3508,7 @@
}
// appendFixed32PackedSlice encodes a []uint32 pointer as a packed repeated Fixed32.
-func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Uint32Slice()
if len(s) == 0 {
return b, nil
@@ -3530,25 +3530,25 @@
}
// sizeFixed32Value returns the size of wire encoding a uint32 value as a Fixed32.
-func sizeFixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeFixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
return tagsize + protowire.SizeFixed32()
}
// appendFixed32Value encodes a uint32 value as a Fixed32.
-func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
b = protowire.AppendVarint(b, wiretag)
b = protowire.AppendFixed32(b, uint32(v.Uint()))
return b, nil
}
// consumeFixed32Value decodes a uint32 value as a Fixed32.
-func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
if wtyp != protowire.Fixed32Type {
return protoreflect.Value{}, out, errUnknown
}
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
out.n = n
return protoreflect.ValueOfUint32(uint32(v)), out, nil
@@ -3562,14 +3562,14 @@
}
// sizeFixed32SliceValue returns the size of wire encoding a []uint32 value as a repeated Fixed32.
-func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
size = list.Len() * (tagsize + protowire.SizeFixed32())
return size
}
// appendFixed32SliceValue encodes a []uint32 value as a repeated Fixed32.
-func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -3580,17 +3580,17 @@
}
// consumeFixed32SliceValue wire decodes a []uint32 value as a repeated Fixed32.
-func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
list := listv.List()
if wtyp == protowire.BytesType {
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
for len(b) > 0 {
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfUint32(uint32(v)))
b = b[n:]
@@ -3603,7 +3603,7 @@
}
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfUint32(uint32(v)))
out.n = n
@@ -3618,7 +3618,7 @@
}
// sizeFixed32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Fixed32.
-func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -3629,7 +3629,7 @@
}
// appendFixed32PackedSliceValue encodes a []uint32 value as a packed repeated Fixed32.
-func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -3653,13 +3653,13 @@
}
// sizeFloat returns the size of wire encoding a float32 pointer as a Float.
-func sizeFloat(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFloat(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
return f.tagsize + protowire.SizeFixed32()
}
// appendFloat wire encodes a float32 pointer as a Float.
-func appendFloat(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFloat(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Float32()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendFixed32(b, math.Float32bits(v))
@@ -3667,13 +3667,13 @@
}
// consumeFloat wire decodes a float32 pointer as a Float.
-func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.Fixed32Type {
return out, errUnknown
}
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*p.Float32() = math.Float32frombits(v)
out.n = n
@@ -3689,7 +3689,7 @@
// sizeFloatNoZero returns the size of wire encoding a float32 pointer as a Float.
// The zero value is not encoded.
-func sizeFloatNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFloatNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.Float32()
if v == 0 && !math.Signbit(float64(v)) {
return 0
@@ -3699,7 +3699,7 @@
// appendFloatNoZero wire encodes a float32 pointer as a Float.
// The zero value is not encoded.
-func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Float32()
if v == 0 && !math.Signbit(float64(v)) {
return b, nil
@@ -3718,13 +3718,13 @@
// sizeFloatPtr returns the size of wire encoding a *float32 pointer as a Float.
// It panics if the pointer is nil.
-func sizeFloatPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFloatPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
return f.tagsize + protowire.SizeFixed32()
}
// appendFloatPtr wire encodes a *float32 pointer as a Float.
// It panics if the pointer is nil.
-func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := **p.Float32Ptr()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendFixed32(b, math.Float32bits(v))
@@ -3732,13 +3732,13 @@
}
// consumeFloatPtr wire decodes a *float32 pointer as a Float.
-func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.Fixed32Type {
return out, errUnknown
}
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
vp := p.Float32Ptr()
if *vp == nil {
@@ -3757,14 +3757,14 @@
}
// sizeFloatSlice returns the size of wire encoding a []float32 pointer as a repeated Float.
-func sizeFloatSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFloatSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Float32Slice()
size = len(s) * (f.tagsize + protowire.SizeFixed32())
return size
}
// appendFloatSlice encodes a []float32 pointer as a repeated Float.
-func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Float32Slice()
for _, v := range s {
b = protowire.AppendVarint(b, f.wiretag)
@@ -3774,18 +3774,18 @@
}
// consumeFloatSlice wire decodes a []float32 pointer as a repeated Float.
-func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Float32Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
for len(b) > 0 {
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
s = append(s, math.Float32frombits(v))
b = b[n:]
@@ -3799,7 +3799,7 @@
}
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*sp = append(*sp, math.Float32frombits(v))
out.n = n
@@ -3814,7 +3814,7 @@
}
// sizeFloatPackedSlice returns the size of wire encoding a []float32 pointer as a packed repeated Float.
-func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Float32Slice()
if len(s) == 0 {
return 0
@@ -3824,7 +3824,7 @@
}
// appendFloatPackedSlice encodes a []float32 pointer as a packed repeated Float.
-func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Float32Slice()
if len(s) == 0 {
return b, nil
@@ -3846,25 +3846,25 @@
}
// sizeFloatValue returns the size of wire encoding a float32 value as a Float.
-func sizeFloatValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeFloatValue(v protoreflect.Value, tagsize int, opts marshalOptions) int {
return tagsize + protowire.SizeFixed32()
}
// appendFloatValue encodes a float32 value as a Float.
-func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
b = protowire.AppendVarint(b, wiretag)
b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float())))
return b, nil
}
// consumeFloatValue decodes a float32 value as a Float.
-func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
if wtyp != protowire.Fixed32Type {
return protoreflect.Value{}, out, errUnknown
}
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
out.n = n
return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), out, nil
@@ -3878,14 +3878,14 @@
}
// sizeFloatSliceValue returns the size of wire encoding a []float32 value as a repeated Float.
-func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
size = list.Len() * (tagsize + protowire.SizeFixed32())
return size
}
// appendFloatSliceValue encodes a []float32 value as a repeated Float.
-func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -3896,17 +3896,17 @@
}
// consumeFloatSliceValue wire decodes a []float32 value as a repeated Float.
-func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
list := listv.List()
if wtyp == protowire.BytesType {
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
for len(b) > 0 {
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))))
b = b[n:]
@@ -3919,7 +3919,7 @@
}
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))))
out.n = n
@@ -3934,7 +3934,7 @@
}
// sizeFloatPackedSliceValue returns the size of wire encoding a []float32 value as a packed repeated Float.
-func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -3945,7 +3945,7 @@
}
// appendFloatPackedSliceValue encodes a []float32 value as a packed repeated Float.
-func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -3969,13 +3969,13 @@
}
// sizeSfixed64 returns the size of wire encoding a int64 pointer as a Sfixed64.
-func sizeSfixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSfixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
return f.tagsize + protowire.SizeFixed64()
}
// appendSfixed64 wire encodes a int64 pointer as a Sfixed64.
-func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Int64()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendFixed64(b, uint64(v))
@@ -3983,13 +3983,13 @@
}
// consumeSfixed64 wire decodes a int64 pointer as a Sfixed64.
-func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.Fixed64Type {
return out, errUnknown
}
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*p.Int64() = int64(v)
out.n = n
@@ -4005,7 +4005,7 @@
// sizeSfixed64NoZero returns the size of wire encoding a int64 pointer as a Sfixed64.
// The zero value is not encoded.
-func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.Int64()
if v == 0 {
return 0
@@ -4015,7 +4015,7 @@
// appendSfixed64NoZero wire encodes a int64 pointer as a Sfixed64.
// The zero value is not encoded.
-func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Int64()
if v == 0 {
return b, nil
@@ -4034,13 +4034,13 @@
// sizeSfixed64Ptr returns the size of wire encoding a *int64 pointer as a Sfixed64.
// It panics if the pointer is nil.
-func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
return f.tagsize + protowire.SizeFixed64()
}
// appendSfixed64Ptr wire encodes a *int64 pointer as a Sfixed64.
// It panics if the pointer is nil.
-func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := **p.Int64Ptr()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendFixed64(b, uint64(v))
@@ -4048,13 +4048,13 @@
}
// consumeSfixed64Ptr wire decodes a *int64 pointer as a Sfixed64.
-func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.Fixed64Type {
return out, errUnknown
}
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
vp := p.Int64Ptr()
if *vp == nil {
@@ -4073,14 +4073,14 @@
}
// sizeSfixed64Slice returns the size of wire encoding a []int64 pointer as a repeated Sfixed64.
-func sizeSfixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSfixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Int64Slice()
size = len(s) * (f.tagsize + protowire.SizeFixed64())
return size
}
// appendSfixed64Slice encodes a []int64 pointer as a repeated Sfixed64.
-func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Int64Slice()
for _, v := range s {
b = protowire.AppendVarint(b, f.wiretag)
@@ -4090,18 +4090,18 @@
}
// consumeSfixed64Slice wire decodes a []int64 pointer as a repeated Sfixed64.
-func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int64Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
for len(b) > 0 {
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
s = append(s, int64(v))
b = b[n:]
@@ -4115,7 +4115,7 @@
}
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*sp = append(*sp, int64(v))
out.n = n
@@ -4130,7 +4130,7 @@
}
// sizeSfixed64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sfixed64.
-func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Int64Slice()
if len(s) == 0 {
return 0
@@ -4140,7 +4140,7 @@
}
// appendSfixed64PackedSlice encodes a []int64 pointer as a packed repeated Sfixed64.
-func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Int64Slice()
if len(s) == 0 {
return b, nil
@@ -4162,25 +4162,25 @@
}
// sizeSfixed64Value returns the size of wire encoding a int64 value as a Sfixed64.
-func sizeSfixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeSfixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
return tagsize + protowire.SizeFixed64()
}
// appendSfixed64Value encodes a int64 value as a Sfixed64.
-func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
b = protowire.AppendVarint(b, wiretag)
b = protowire.AppendFixed64(b, uint64(v.Int()))
return b, nil
}
// consumeSfixed64Value decodes a int64 value as a Sfixed64.
-func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
if wtyp != protowire.Fixed64Type {
return protoreflect.Value{}, out, errUnknown
}
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
out.n = n
return protoreflect.ValueOfInt64(int64(v)), out, nil
@@ -4194,14 +4194,14 @@
}
// sizeSfixed64SliceValue returns the size of wire encoding a []int64 value as a repeated Sfixed64.
-func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
size = list.Len() * (tagsize + protowire.SizeFixed64())
return size
}
// appendSfixed64SliceValue encodes a []int64 value as a repeated Sfixed64.
-func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -4212,17 +4212,17 @@
}
// consumeSfixed64SliceValue wire decodes a []int64 value as a repeated Sfixed64.
-func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
list := listv.List()
if wtyp == protowire.BytesType {
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
for len(b) > 0 {
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfInt64(int64(v)))
b = b[n:]
@@ -4235,7 +4235,7 @@
}
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfInt64(int64(v)))
out.n = n
@@ -4250,7 +4250,7 @@
}
// sizeSfixed64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sfixed64.
-func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -4261,7 +4261,7 @@
}
// appendSfixed64PackedSliceValue encodes a []int64 value as a packed repeated Sfixed64.
-func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -4285,13 +4285,13 @@
}
// sizeFixed64 returns the size of wire encoding a uint64 pointer as a Fixed64.
-func sizeFixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
return f.tagsize + protowire.SizeFixed64()
}
// appendFixed64 wire encodes a uint64 pointer as a Fixed64.
-func appendFixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Uint64()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendFixed64(b, v)
@@ -4299,13 +4299,13 @@
}
// consumeFixed64 wire decodes a uint64 pointer as a Fixed64.
-func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.Fixed64Type {
return out, errUnknown
}
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*p.Uint64() = v
out.n = n
@@ -4321,7 +4321,7 @@
// sizeFixed64NoZero returns the size of wire encoding a uint64 pointer as a Fixed64.
// The zero value is not encoded.
-func sizeFixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.Uint64()
if v == 0 {
return 0
@@ -4331,7 +4331,7 @@
// appendFixed64NoZero wire encodes a uint64 pointer as a Fixed64.
// The zero value is not encoded.
-func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Uint64()
if v == 0 {
return b, nil
@@ -4350,13 +4350,13 @@
// sizeFixed64Ptr returns the size of wire encoding a *uint64 pointer as a Fixed64.
// It panics if the pointer is nil.
-func sizeFixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
return f.tagsize + protowire.SizeFixed64()
}
// appendFixed64Ptr wire encodes a *uint64 pointer as a Fixed64.
// It panics if the pointer is nil.
-func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := **p.Uint64Ptr()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendFixed64(b, v)
@@ -4364,13 +4364,13 @@
}
// consumeFixed64Ptr wire decodes a *uint64 pointer as a Fixed64.
-func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.Fixed64Type {
return out, errUnknown
}
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
vp := p.Uint64Ptr()
if *vp == nil {
@@ -4389,14 +4389,14 @@
}
// sizeFixed64Slice returns the size of wire encoding a []uint64 pointer as a repeated Fixed64.
-func sizeFixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Uint64Slice()
size = len(s) * (f.tagsize + protowire.SizeFixed64())
return size
}
// appendFixed64Slice encodes a []uint64 pointer as a repeated Fixed64.
-func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Uint64Slice()
for _, v := range s {
b = protowire.AppendVarint(b, f.wiretag)
@@ -4406,18 +4406,18 @@
}
// consumeFixed64Slice wire decodes a []uint64 pointer as a repeated Fixed64.
-func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Uint64Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
for len(b) > 0 {
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
s = append(s, v)
b = b[n:]
@@ -4431,7 +4431,7 @@
}
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*sp = append(*sp, v)
out.n = n
@@ -4446,7 +4446,7 @@
}
// sizeFixed64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Fixed64.
-func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Uint64Slice()
if len(s) == 0 {
return 0
@@ -4456,7 +4456,7 @@
}
// appendFixed64PackedSlice encodes a []uint64 pointer as a packed repeated Fixed64.
-func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Uint64Slice()
if len(s) == 0 {
return b, nil
@@ -4478,25 +4478,25 @@
}
// sizeFixed64Value returns the size of wire encoding a uint64 value as a Fixed64.
-func sizeFixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeFixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
return tagsize + protowire.SizeFixed64()
}
// appendFixed64Value encodes a uint64 value as a Fixed64.
-func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
b = protowire.AppendVarint(b, wiretag)
b = protowire.AppendFixed64(b, v.Uint())
return b, nil
}
// consumeFixed64Value decodes a uint64 value as a Fixed64.
-func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
if wtyp != protowire.Fixed64Type {
return protoreflect.Value{}, out, errUnknown
}
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
out.n = n
return protoreflect.ValueOfUint64(v), out, nil
@@ -4510,14 +4510,14 @@
}
// sizeFixed64SliceValue returns the size of wire encoding a []uint64 value as a repeated Fixed64.
-func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
size = list.Len() * (tagsize + protowire.SizeFixed64())
return size
}
// appendFixed64SliceValue encodes a []uint64 value as a repeated Fixed64.
-func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -4528,17 +4528,17 @@
}
// consumeFixed64SliceValue wire decodes a []uint64 value as a repeated Fixed64.
-func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
list := listv.List()
if wtyp == protowire.BytesType {
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
for len(b) > 0 {
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfUint64(v))
b = b[n:]
@@ -4551,7 +4551,7 @@
}
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfUint64(v))
out.n = n
@@ -4566,7 +4566,7 @@
}
// sizeFixed64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Fixed64.
-func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -4577,7 +4577,7 @@
}
// appendFixed64PackedSliceValue encodes a []uint64 value as a packed repeated Fixed64.
-func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -4601,13 +4601,13 @@
}
// sizeDouble returns the size of wire encoding a float64 pointer as a Double.
-func sizeDouble(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeDouble(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
return f.tagsize + protowire.SizeFixed64()
}
// appendDouble wire encodes a float64 pointer as a Double.
-func appendDouble(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendDouble(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Float64()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendFixed64(b, math.Float64bits(v))
@@ -4615,13 +4615,13 @@
}
// consumeDouble wire decodes a float64 pointer as a Double.
-func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.Fixed64Type {
return out, errUnknown
}
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*p.Float64() = math.Float64frombits(v)
out.n = n
@@ -4637,7 +4637,7 @@
// sizeDoubleNoZero returns the size of wire encoding a float64 pointer as a Double.
// The zero value is not encoded.
-func sizeDoubleNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeDoubleNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.Float64()
if v == 0 && !math.Signbit(float64(v)) {
return 0
@@ -4647,7 +4647,7 @@
// appendDoubleNoZero wire encodes a float64 pointer as a Double.
// The zero value is not encoded.
-func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Float64()
if v == 0 && !math.Signbit(float64(v)) {
return b, nil
@@ -4666,13 +4666,13 @@
// sizeDoublePtr returns the size of wire encoding a *float64 pointer as a Double.
// It panics if the pointer is nil.
-func sizeDoublePtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeDoublePtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
return f.tagsize + protowire.SizeFixed64()
}
// appendDoublePtr wire encodes a *float64 pointer as a Double.
// It panics if the pointer is nil.
-func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := **p.Float64Ptr()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendFixed64(b, math.Float64bits(v))
@@ -4680,13 +4680,13 @@
}
// consumeDoublePtr wire decodes a *float64 pointer as a Double.
-func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.Fixed64Type {
return out, errUnknown
}
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
vp := p.Float64Ptr()
if *vp == nil {
@@ -4705,14 +4705,14 @@
}
// sizeDoubleSlice returns the size of wire encoding a []float64 pointer as a repeated Double.
-func sizeDoubleSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeDoubleSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Float64Slice()
size = len(s) * (f.tagsize + protowire.SizeFixed64())
return size
}
// appendDoubleSlice encodes a []float64 pointer as a repeated Double.
-func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Float64Slice()
for _, v := range s {
b = protowire.AppendVarint(b, f.wiretag)
@@ -4722,18 +4722,18 @@
}
// consumeDoubleSlice wire decodes a []float64 pointer as a repeated Double.
-func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Float64Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
for len(b) > 0 {
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
s = append(s, math.Float64frombits(v))
b = b[n:]
@@ -4747,7 +4747,7 @@
}
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*sp = append(*sp, math.Float64frombits(v))
out.n = n
@@ -4762,7 +4762,7 @@
}
// sizeDoublePackedSlice returns the size of wire encoding a []float64 pointer as a packed repeated Double.
-func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.Float64Slice()
if len(s) == 0 {
return 0
@@ -4772,7 +4772,7 @@
}
// appendDoublePackedSlice encodes a []float64 pointer as a packed repeated Double.
-func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.Float64Slice()
if len(s) == 0 {
return b, nil
@@ -4794,25 +4794,25 @@
}
// sizeDoubleValue returns the size of wire encoding a float64 value as a Double.
-func sizeDoubleValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeDoubleValue(v protoreflect.Value, tagsize int, opts marshalOptions) int {
return tagsize + protowire.SizeFixed64()
}
// appendDoubleValue encodes a float64 value as a Double.
-func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
b = protowire.AppendVarint(b, wiretag)
b = protowire.AppendFixed64(b, math.Float64bits(v.Float()))
return b, nil
}
// consumeDoubleValue decodes a float64 value as a Double.
-func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
if wtyp != protowire.Fixed64Type {
return protoreflect.Value{}, out, errUnknown
}
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
out.n = n
return protoreflect.ValueOfFloat64(math.Float64frombits(v)), out, nil
@@ -4826,14 +4826,14 @@
}
// sizeDoubleSliceValue returns the size of wire encoding a []float64 value as a repeated Double.
-func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
size = list.Len() * (tagsize + protowire.SizeFixed64())
return size
}
// appendDoubleSliceValue encodes a []float64 value as a repeated Double.
-func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -4844,17 +4844,17 @@
}
// consumeDoubleSliceValue wire decodes a []float64 value as a repeated Double.
-func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
list := listv.List()
if wtyp == protowire.BytesType {
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
for len(b) > 0 {
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v)))
b = b[n:]
@@ -4867,7 +4867,7 @@
}
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v)))
out.n = n
@@ -4882,7 +4882,7 @@
}
// sizeDoublePackedSliceValue returns the size of wire encoding a []float64 value as a packed repeated Double.
-func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -4893,7 +4893,7 @@
}
// appendDoublePackedSliceValue encodes a []float64 value as a packed repeated Double.
-func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
llen := list.Len()
if llen == 0 {
@@ -4917,13 +4917,13 @@
}
// sizeString returns the size of wire encoding a string pointer as a String.
-func sizeString(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeString(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.String()
return f.tagsize + protowire.SizeBytes(len(v))
}
// appendString wire encodes a string pointer as a String.
-func appendString(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendString(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.String()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendString(b, v)
@@ -4931,15 +4931,15 @@
}
// consumeString wire decodes a string pointer as a String.
-func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.BytesType {
return out, errUnknown
}
- v, n := protowire.ConsumeString(b)
+ v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
- *p.String() = v
+ *p.String() = string(v)
out.n = n
return out, nil
}
@@ -4952,7 +4952,7 @@
}
// appendStringValidateUTF8 wire encodes a string pointer as a String.
-func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.String()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendString(b, v)
@@ -4963,18 +4963,18 @@
}
// consumeStringValidateUTF8 wire decodes a string pointer as a String.
-func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.BytesType {
return out, errUnknown
}
- v, n := protowire.ConsumeString(b)
+ v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
- if !utf8.ValidString(v) {
+ if !utf8.Valid(v) {
return out, errInvalidUTF8{}
}
- *p.String() = v
+ *p.String() = string(v)
out.n = n
return out, nil
}
@@ -4988,7 +4988,7 @@
// sizeStringNoZero returns the size of wire encoding a string pointer as a String.
// The zero value is not encoded.
-func sizeStringNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeStringNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.String()
if len(v) == 0 {
return 0
@@ -4998,7 +4998,7 @@
// appendStringNoZero wire encodes a string pointer as a String.
// The zero value is not encoded.
-func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.String()
if len(v) == 0 {
return b, nil
@@ -5017,7 +5017,7 @@
// appendStringNoZeroValidateUTF8 wire encodes a string pointer as a String.
// The zero value is not encoded.
-func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.String()
if len(v) == 0 {
return b, nil
@@ -5039,14 +5039,14 @@
// sizeStringPtr returns the size of wire encoding a *string pointer as a String.
// It panics if the pointer is nil.
-func sizeStringPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeStringPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := **p.StringPtr()
return f.tagsize + protowire.SizeBytes(len(v))
}
// appendStringPtr wire encodes a *string pointer as a String.
// It panics if the pointer is nil.
-func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := **p.StringPtr()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendString(b, v)
@@ -5054,19 +5054,19 @@
}
// consumeStringPtr wire decodes a *string pointer as a String.
-func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.BytesType {
return out, errUnknown
}
- v, n := protowire.ConsumeString(b)
+ v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
vp := p.StringPtr()
if *vp == nil {
*vp = new(string)
}
- **vp = v
+ **vp = string(v)
out.n = n
return out, nil
}
@@ -5078,8 +5078,48 @@
merge: mergeStringPtr,
}
+// appendStringPtrValidateUTF8 wire encodes a *string pointer as a String.
+// It panics if the pointer is nil.
+func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := **p.StringPtr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ if !utf8.ValidString(v) {
+ return b, errInvalidUTF8{}
+ }
+ return b, nil
+}
+
+// consumeStringPtrValidateUTF8 wire decodes a *string pointer as a String.
+func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ if !utf8.Valid(v) {
+ return out, errInvalidUTF8{}
+ }
+ vp := p.StringPtr()
+ if *vp == nil {
+ *vp = new(string)
+ }
+ **vp = string(v)
+ out.n = n
+ return out, nil
+}
+
+var coderStringPtrValidateUTF8 = pointerCoderFuncs{
+ size: sizeStringPtr,
+ marshal: appendStringPtrValidateUTF8,
+ unmarshal: consumeStringPtrValidateUTF8,
+ merge: mergeStringPtr,
+}
+
// sizeStringSlice returns the size of wire encoding a []string pointer as a repeated String.
-func sizeStringSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeStringSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.StringSlice()
for _, v := range s {
size += f.tagsize + protowire.SizeBytes(len(v))
@@ -5088,7 +5128,7 @@
}
// appendStringSlice encodes a []string pointer as a repeated String.
-func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.StringSlice()
for _, v := range s {
b = protowire.AppendVarint(b, f.wiretag)
@@ -5098,16 +5138,16 @@
}
// consumeStringSlice wire decodes a []string pointer as a repeated String.
-func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.StringSlice()
if wtyp != protowire.BytesType {
return out, errUnknown
}
- v, n := protowire.ConsumeString(b)
+ v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
- *sp = append(*sp, v)
+ *sp = append(*sp, string(v))
out.n = n
return out, nil
}
@@ -5120,7 +5160,7 @@
}
// appendStringSliceValidateUTF8 encodes a []string pointer as a repeated String.
-func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.StringSlice()
for _, v := range s {
b = protowire.AppendVarint(b, f.wiretag)
@@ -5133,19 +5173,19 @@
}
// consumeStringSliceValidateUTF8 wire decodes a []string pointer as a repeated String.
-func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
- sp := p.StringSlice()
+func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.BytesType {
return out, errUnknown
}
- v, n := protowire.ConsumeString(b)
+ v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
- if !utf8.ValidString(v) {
+ if !utf8.Valid(v) {
return out, errInvalidUTF8{}
}
- *sp = append(*sp, v)
+ sp := p.StringSlice()
+ *sp = append(*sp, string(v))
out.n = n
return out, nil
}
@@ -5158,25 +5198,25 @@
}
// sizeStringValue returns the size of wire encoding a string value as a String.
-func sizeStringValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeStringValue(v protoreflect.Value, tagsize int, opts marshalOptions) int {
return tagsize + protowire.SizeBytes(len(v.String()))
}
// appendStringValue encodes a string value as a String.
-func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
b = protowire.AppendVarint(b, wiretag)
b = protowire.AppendString(b, v.String())
return b, nil
}
// consumeStringValue decodes a string value as a String.
-func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
if wtyp != protowire.BytesType {
return protoreflect.Value{}, out, errUnknown
}
- v, n := protowire.ConsumeString(b)
+ v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
out.n = n
return protoreflect.ValueOfString(string(v)), out, nil
@@ -5190,7 +5230,7 @@
}
// appendStringValueValidateUTF8 encodes a string value as a String.
-func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
b = protowire.AppendVarint(b, wiretag)
b = protowire.AppendString(b, v.String())
if !utf8.ValidString(v.String()) {
@@ -5200,15 +5240,15 @@
}
// consumeStringValueValidateUTF8 decodes a string value as a String.
-func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
if wtyp != protowire.BytesType {
return protoreflect.Value{}, out, errUnknown
}
- v, n := protowire.ConsumeString(b)
+ v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
- if !utf8.ValidString(v) {
+ if !utf8.Valid(v) {
return protoreflect.Value{}, out, errInvalidUTF8{}
}
out.n = n
@@ -5223,7 +5263,7 @@
}
// sizeStringSliceValue returns the size of wire encoding a []string value as a repeated String.
-func sizeStringSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeStringSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -5233,7 +5273,7 @@
}
// appendStringSliceValue encodes a []string value as a repeated String.
-func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -5244,14 +5284,14 @@
}
// consumeStringSliceValue wire decodes a []string value as a repeated String.
-func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
list := listv.List()
if wtyp != protowire.BytesType {
return protoreflect.Value{}, out, errUnknown
}
- v, n := protowire.ConsumeString(b)
+ v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfString(string(v)))
out.n = n
@@ -5266,13 +5306,13 @@
}
// sizeBytes returns the size of wire encoding a []byte pointer as a Bytes.
-func sizeBytes(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeBytes(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.Bytes()
return f.tagsize + protowire.SizeBytes(len(v))
}
// appendBytes wire encodes a []byte pointer as a Bytes.
-func appendBytes(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendBytes(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Bytes()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendBytes(b, v)
@@ -5280,13 +5320,13 @@
}
// consumeBytes wire decodes a []byte pointer as a Bytes.
-func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.BytesType {
return out, errUnknown
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*p.Bytes() = append(emptyBuf[:], v...)
out.n = n
@@ -5301,7 +5341,7 @@
}
// appendBytesValidateUTF8 wire encodes a []byte pointer as a Bytes.
-func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Bytes()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendBytes(b, v)
@@ -5312,13 +5352,13 @@
}
// consumeBytesValidateUTF8 wire decodes a []byte pointer as a Bytes.
-func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.BytesType {
return out, errUnknown
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
if !utf8.Valid(v) {
return out, errInvalidUTF8{}
@@ -5337,7 +5377,7 @@
// sizeBytesNoZero returns the size of wire encoding a []byte pointer as a Bytes.
// The zero value is not encoded.
-func sizeBytesNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeBytesNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
v := *p.Bytes()
if len(v) == 0 {
return 0
@@ -5347,7 +5387,7 @@
// appendBytesNoZero wire encodes a []byte pointer as a Bytes.
// The zero value is not encoded.
-func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Bytes()
if len(v) == 0 {
return b, nil
@@ -5359,13 +5399,13 @@
// consumeBytesNoZero wire decodes a []byte pointer as a Bytes.
// The zero value is not decoded.
-func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.BytesType {
return out, errUnknown
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*p.Bytes() = append(([]byte)(nil), v...)
out.n = n
@@ -5381,7 +5421,7 @@
// appendBytesNoZeroValidateUTF8 wire encodes a []byte pointer as a Bytes.
// The zero value is not encoded.
-func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := *p.Bytes()
if len(v) == 0 {
return b, nil
@@ -5395,13 +5435,13 @@
}
// consumeBytesNoZeroValidateUTF8 wire decodes a []byte pointer as a Bytes.
-func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.BytesType {
return out, errUnknown
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
if !utf8.Valid(v) {
return out, errInvalidUTF8{}
@@ -5419,7 +5459,7 @@
}
// sizeBytesSlice returns the size of wire encoding a [][]byte pointer as a repeated Bytes.
-func sizeBytesSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeBytesSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := *p.BytesSlice()
for _, v := range s {
size += f.tagsize + protowire.SizeBytes(len(v))
@@ -5428,7 +5468,7 @@
}
// appendBytesSlice encodes a [][]byte pointer as a repeated Bytes.
-func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.BytesSlice()
for _, v := range s {
b = protowire.AppendVarint(b, f.wiretag)
@@ -5438,14 +5478,14 @@
}
// consumeBytesSlice wire decodes a [][]byte pointer as a repeated Bytes.
-func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.BytesSlice()
if wtyp != protowire.BytesType {
return out, errUnknown
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
*sp = append(*sp, append(emptyBuf[:], v...))
out.n = n
@@ -5460,7 +5500,7 @@
}
// appendBytesSliceValidateUTF8 encodes a [][]byte pointer as a repeated Bytes.
-func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := *p.BytesSlice()
for _, v := range s {
b = protowire.AppendVarint(b, f.wiretag)
@@ -5473,18 +5513,18 @@
}
// consumeBytesSliceValidateUTF8 wire decodes a [][]byte pointer as a repeated Bytes.
-func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
- sp := p.BytesSlice()
+func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.BytesType {
return out, errUnknown
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
if !utf8.Valid(v) {
return out, errInvalidUTF8{}
}
+ sp := p.BytesSlice()
*sp = append(*sp, append(emptyBuf[:], v...))
out.n = n
return out, nil
@@ -5498,25 +5538,25 @@
}
// sizeBytesValue returns the size of wire encoding a []byte value as a Bytes.
-func sizeBytesValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeBytesValue(v protoreflect.Value, tagsize int, opts marshalOptions) int {
return tagsize + protowire.SizeBytes(len(v.Bytes()))
}
// appendBytesValue encodes a []byte value as a Bytes.
-func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
b = protowire.AppendVarint(b, wiretag)
b = protowire.AppendBytes(b, v.Bytes())
return b, nil
}
// consumeBytesValue decodes a []byte value as a Bytes.
-func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
if wtyp != protowire.BytesType {
return protoreflect.Value{}, out, errUnknown
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
out.n = n
return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), out, nil
@@ -5530,7 +5570,7 @@
}
// sizeBytesSliceValue returns the size of wire encoding a [][]byte value as a repeated Bytes.
-func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -5540,7 +5580,7 @@
}
// appendBytesSliceValue encodes a [][]byte value as a repeated Bytes.
-func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
v := list.Get(i)
@@ -5551,14 +5591,14 @@
}
// consumeBytesSliceValue wire decodes a [][]byte value as a repeated Bytes.
-func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
list := listv.List()
if wtyp != protowire.BytesType {
return protoreflect.Value{}, out, errUnknown
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return protoreflect.Value{}, out, protowire.ParseError(n)
+ return protoreflect.Value{}, out, errDecode
}
list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...)))
out.n = n
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
index 35a67c2..c1245fe 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
@@ -5,11 +5,11 @@
package impl
import (
- "errors"
"reflect"
"sort"
"google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/genid"
pref "google.golang.org/protobuf/reflect/protoreflect"
)
@@ -117,7 +117,7 @@
}
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
var (
key = mapi.keyZero
@@ -126,15 +126,15 @@
for len(b) > 0 {
num, wtyp, n := protowire.ConsumeTag(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
if num > protowire.MaxValidNumber {
- return out, errors.New("invalid field number")
+ return out, errDecode
}
b = b[n:]
err := errUnknown
switch num {
- case 1:
+ case genid.MapEntry_Key_field_number:
var v pref.Value
var o unmarshalOutput
v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts)
@@ -143,7 +143,7 @@
}
key = v
n = o.n
- case 2:
+ case genid.MapEntry_Value_field_number:
var v pref.Value
var o unmarshalOutput
v, o, err = mapi.valFuncs.unmarshal(b, val, num, wtyp, opts)
@@ -156,7 +156,7 @@
if err == errUnknown {
n = protowire.ConsumeFieldValue(num, wtyp, b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
} else if err != nil {
return out, err
@@ -174,7 +174,7 @@
}
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
var (
key = mapi.keyZero
@@ -183,10 +183,10 @@
for len(b) > 0 {
num, wtyp, n := protowire.ConsumeTag(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
if num > protowire.MaxValidNumber {
- return out, errors.New("invalid field number")
+ return out, errDecode
}
b = b[n:]
err := errUnknown
@@ -207,7 +207,7 @@
var v []byte
v, n = protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
var o unmarshalOutput
o, err = f.mi.unmarshalPointer(v, pointerOfValue(val), 0, opts)
@@ -220,7 +220,7 @@
if err == errUnknown {
n = protowire.ConsumeFieldValue(num, wtyp, b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
} else if err != nil {
return out, err
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
index 370ec65..cd40527 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
@@ -11,7 +11,7 @@
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/encoding/messageset"
- "google.golang.org/protobuf/internal/fieldsort"
+ "google.golang.org/protobuf/internal/order"
pref "google.golang.org/protobuf/reflect/protoreflect"
piface "google.golang.org/protobuf/runtime/protoiface"
)
@@ -27,6 +27,7 @@
coderFields map[protowire.Number]*coderFieldInfo
sizecacheOffset offset
unknownOffset offset
+ unknownPtrKind bool
extensionOffset offset
needsInitCheck bool
isMessageSet bool
@@ -47,17 +48,30 @@
}
func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
- mi.sizecacheOffset = si.sizecacheOffset
- mi.unknownOffset = si.unknownOffset
- mi.extensionOffset = si.extensionOffset
+ mi.sizecacheOffset = invalidOffset
+ mi.unknownOffset = invalidOffset
+ mi.extensionOffset = invalidOffset
+
+ if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType {
+ mi.sizecacheOffset = si.sizecacheOffset
+ }
+ if si.unknownOffset.IsValid() && (si.unknownType == unknownFieldsAType || si.unknownType == unknownFieldsBType) {
+ mi.unknownOffset = si.unknownOffset
+ mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr
+ }
+ if si.extensionOffset.IsValid() && si.extensionType == extensionFieldsType {
+ mi.extensionOffset = si.extensionOffset
+ }
mi.coderFields = make(map[protowire.Number]*coderFieldInfo)
fields := mi.Desc.Fields()
+ preallocFields := make([]coderFieldInfo, fields.Len())
for i := 0; i < fields.Len(); i++ {
fd := fields.Get(i)
fs := si.fieldsByNumber[fd.Number()]
- if fd.ContainingOneof() != nil {
+ isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic()
+ if isOneof {
fs = si.oneofsByName[fd.ContainingOneof().Name()]
}
ft := fs.Type
@@ -71,7 +85,28 @@
var funcs pointerCoderFuncs
var childMessage *MessageInfo
switch {
- case fd.ContainingOneof() != nil:
+ case ft == nil:
+ // This never occurs for generated message types.
+ // It implies that a hand-crafted type has missing Go fields
+ // for specific protobuf message fields.
+ funcs = pointerCoderFuncs{
+ size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ return 0
+ },
+ marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ return nil, nil
+ },
+ unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ panic("missing Go struct field for " + string(fd.FullName()))
+ },
+ isInit: func(p pointer, f *coderFieldInfo) error {
+ panic("missing Go struct field for " + string(fd.FullName()))
+ },
+ merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ panic("missing Go struct field for " + string(fd.FullName()))
+ },
+ }
+ case isOneof:
fieldOffset = offsetOf(fs, mi.Exporter)
case fd.IsWeak():
fieldOffset = si.weakOffset
@@ -80,7 +115,8 @@
fieldOffset = offsetOf(fs, mi.Exporter)
childMessage, funcs = fieldCoder(fd, ft)
}
- cf := &coderFieldInfo{
+ cf := &preallocFields[i]
+ *cf = coderFieldInfo{
num: fd.Number(),
offset: fieldOffset,
wiretag: wiretag,
@@ -89,17 +125,16 @@
funcs: funcs,
mi: childMessage,
validation: newFieldValidationInfo(mi, si, fd, ft),
- isPointer: (fd.Cardinality() == pref.Repeated ||
- fd.Kind() == pref.MessageKind ||
- fd.Kind() == pref.GroupKind ||
- fd.Syntax() != pref.Proto3),
+ isPointer: fd.Cardinality() == pref.Repeated || fd.HasPresence(),
isRequired: fd.Cardinality() == pref.Required,
}
mi.orderedCoderFields = append(mi.orderedCoderFields, cf)
mi.coderFields[cf.num] = cf
}
for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ {
- mi.initOneofFieldCoders(oneofs.Get(i), si)
+ if od := oneofs.Get(i); !od.IsSynthetic() {
+ mi.initOneofFieldCoders(od, si)
+ }
}
if messageset.IsMessageSet(mi.Desc) {
if !mi.extensionOffset.IsValid() {
@@ -123,7 +158,7 @@
}
mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1)
for _, cf := range mi.orderedCoderFields {
- if int(cf.num) > len(mi.denseCoderFields) {
+ if int(cf.num) >= len(mi.denseCoderFields) {
break
}
mi.denseCoderFields[cf.num] = cf
@@ -134,7 +169,7 @@
sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
fi := fields.ByNumber(mi.orderedCoderFields[i].num)
fj := fields.ByNumber(mi.orderedCoderFields[j].num)
- return fieldsort.Less(fi, fj)
+ return order.LegacyFieldOrder(fi, fj)
})
}
@@ -155,3 +190,28 @@
mi.methods.Merge = mi.merge
}
}
+
+// getUnknownBytes returns a *[]byte for the unknown fields.
+// It is the caller's responsibility to check whether the pointer is nil.
+// This function is specially designed to be inlineable.
+func (mi *MessageInfo) getUnknownBytes(p pointer) *[]byte {
+ if mi.unknownPtrKind {
+ return *p.Apply(mi.unknownOffset).BytesPtr()
+ } else {
+ return p.Apply(mi.unknownOffset).Bytes()
+ }
+}
+
+// mutableUnknownBytes returns a *[]byte for the unknown fields.
+// The returned pointer is guaranteed to not be nil.
+func (mi *MessageInfo) mutableUnknownBytes(p pointer) *[]byte {
+ if mi.unknownPtrKind {
+ bp := p.Apply(mi.unknownOffset).BytesPtr()
+ if *bp == nil {
+ *bp = new([]byte)
+ }
+ return *bp
+ } else {
+ return p.Apply(mi.unknownOffset).Bytes()
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
index cfb68e1..b7a23fa 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
@@ -29,8 +29,9 @@
size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts)
}
- unknown := *p.Apply(mi.unknownOffset).Bytes()
- size += messageset.SizeUnknown(unknown)
+ if u := mi.getUnknownBytes(p); u != nil {
+ size += messageset.SizeUnknown(*u)
+ }
return size
}
@@ -69,10 +70,12 @@
}
}
- unknown := *p.Apply(mi.unknownOffset).Bytes()
- b, err := messageset.AppendUnknown(b, unknown)
- if err != nil {
- return b, err
+ if u := mi.getUnknownBytes(p); u != nil {
+ var err error
+ b, err = messageset.AppendUnknown(b, *u)
+ if err != nil {
+ return b, err
+ }
}
return b, nil
@@ -100,13 +103,13 @@
*ep = make(map[int32]ExtensionField)
}
ext := *ep
- unknown := p.Apply(mi.unknownOffset).Bytes()
initialized := true
err = messageset.Unmarshal(b, true, func(num protowire.Number, v []byte) error {
o, err := mi.unmarshalExtension(v, num, protowire.BytesType, ext, opts)
if err == errUnknown {
- *unknown = protowire.AppendTag(*unknown, num, protowire.BytesType)
- *unknown = append(*unknown, v...)
+ u := mi.mutableUnknownBytes(p)
+ *u = protowire.AppendTag(*u, num, protowire.BytesType)
+ *u = append(*u, v...)
return nil
}
if !o.initialized {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
index 86f7dc3..90705e3 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
@@ -30,7 +30,7 @@
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
p.v.Elem().SetInt(int64(v))
out.n = n
@@ -130,12 +130,12 @@
if wtyp == protowire.BytesType {
b, n := protowire.ConsumeBytes(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
for len(b) > 0 {
v, n := protowire.ConsumeVarint(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
rv := reflect.New(s.Type().Elem()).Elem()
rv.SetInt(int64(v))
@@ -150,7 +150,7 @@
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
rv := reflect.New(s.Type().Elem()).Elem()
rv.SetInt(int64(v))
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go
index c934c8d..e899712 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go
@@ -338,6 +338,9 @@
return nil, coderDoublePtr
}
case pref.StringKind:
+ if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) {
+ return nil, coderStringPtrValidateUTF8
+ }
if ft.Kind() == reflect.String {
return nil, coderStringPtr
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go
index 9fc384a..acd61bb 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go
@@ -162,7 +162,7 @@
return ok
}
func (c *boolConverter) IsValidGo(v reflect.Value) bool {
- return v.Type() == c.goType
+ return v.IsValid() && v.Type() == c.goType
}
func (c *boolConverter) New() pref.Value { return c.def }
func (c *boolConverter) Zero() pref.Value { return c.def }
@@ -186,7 +186,7 @@
return ok
}
func (c *int32Converter) IsValidGo(v reflect.Value) bool {
- return v.Type() == c.goType
+ return v.IsValid() && v.Type() == c.goType
}
func (c *int32Converter) New() pref.Value { return c.def }
func (c *int32Converter) Zero() pref.Value { return c.def }
@@ -210,7 +210,7 @@
return ok
}
func (c *int64Converter) IsValidGo(v reflect.Value) bool {
- return v.Type() == c.goType
+ return v.IsValid() && v.Type() == c.goType
}
func (c *int64Converter) New() pref.Value { return c.def }
func (c *int64Converter) Zero() pref.Value { return c.def }
@@ -234,7 +234,7 @@
return ok
}
func (c *uint32Converter) IsValidGo(v reflect.Value) bool {
- return v.Type() == c.goType
+ return v.IsValid() && v.Type() == c.goType
}
func (c *uint32Converter) New() pref.Value { return c.def }
func (c *uint32Converter) Zero() pref.Value { return c.def }
@@ -258,7 +258,7 @@
return ok
}
func (c *uint64Converter) IsValidGo(v reflect.Value) bool {
- return v.Type() == c.goType
+ return v.IsValid() && v.Type() == c.goType
}
func (c *uint64Converter) New() pref.Value { return c.def }
func (c *uint64Converter) Zero() pref.Value { return c.def }
@@ -282,7 +282,7 @@
return ok
}
func (c *float32Converter) IsValidGo(v reflect.Value) bool {
- return v.Type() == c.goType
+ return v.IsValid() && v.Type() == c.goType
}
func (c *float32Converter) New() pref.Value { return c.def }
func (c *float32Converter) Zero() pref.Value { return c.def }
@@ -306,7 +306,7 @@
return ok
}
func (c *float64Converter) IsValidGo(v reflect.Value) bool {
- return v.Type() == c.goType
+ return v.IsValid() && v.Type() == c.goType
}
func (c *float64Converter) New() pref.Value { return c.def }
func (c *float64Converter) Zero() pref.Value { return c.def }
@@ -336,7 +336,7 @@
return ok
}
func (c *stringConverter) IsValidGo(v reflect.Value) bool {
- return v.Type() == c.goType
+ return v.IsValid() && v.Type() == c.goType
}
func (c *stringConverter) New() pref.Value { return c.def }
func (c *stringConverter) Zero() pref.Value { return c.def }
@@ -363,7 +363,7 @@
return ok
}
func (c *bytesConverter) IsValidGo(v reflect.Value) bool {
- return v.Type() == c.goType
+ return v.IsValid() && v.Type() == c.goType
}
func (c *bytesConverter) New() pref.Value { return c.def }
func (c *bytesConverter) Zero() pref.Value { return c.def }
@@ -400,7 +400,7 @@
}
func (c *enumConverter) IsValidGo(v reflect.Value) bool {
- return v.Type() == c.goType
+ return v.IsValid() && v.Type() == c.goType
}
func (c *enumConverter) New() pref.Value {
@@ -423,6 +423,13 @@
if v.Type() != c.goType {
panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
}
+ if c.isNonPointer() {
+ if v.CanAddr() {
+ v = v.Addr() // T => *T
+ } else {
+ v = reflect.Zero(reflect.PtrTo(v.Type()))
+ }
+ }
if m, ok := v.Interface().(pref.ProtoMessage); ok {
return pref.ValueOfMessage(m.ProtoReflect())
}
@@ -437,6 +444,16 @@
} else {
rv = reflect.ValueOf(m.Interface())
}
+ if c.isNonPointer() {
+ if rv.Type() != reflect.PtrTo(c.goType) {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), reflect.PtrTo(c.goType)))
+ }
+ if !rv.IsNil() {
+ rv = rv.Elem() // *T => T
+ } else {
+ rv = reflect.Zero(rv.Type().Elem())
+ }
+ }
if rv.Type() != c.goType {
panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), c.goType))
}
@@ -451,17 +468,29 @@
} else {
rv = reflect.ValueOf(m.Interface())
}
+ if c.isNonPointer() {
+ return rv.Type() == reflect.PtrTo(c.goType)
+ }
return rv.Type() == c.goType
}
func (c *messageConverter) IsValidGo(v reflect.Value) bool {
- return v.Type() == c.goType
+ return v.IsValid() && v.Type() == c.goType
}
func (c *messageConverter) New() pref.Value {
+ if c.isNonPointer() {
+ return c.PBValueOf(reflect.New(c.goType).Elem())
+ }
return c.PBValueOf(reflect.New(c.goType.Elem()))
}
func (c *messageConverter) Zero() pref.Value {
return c.PBValueOf(reflect.Zero(c.goType))
}
+
+// isNonPointer reports whether the type is a non-pointer type.
+// This never occurs for generated message types.
+func (c *messageConverter) isNonPointer() bool {
+ return c.goType.Kind() != reflect.Ptr
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
index fe9384a..6fccab5 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
@@ -22,7 +22,7 @@
}
type listConverter struct {
- goType reflect.Type
+ goType reflect.Type // []T
c Converter
}
@@ -48,11 +48,11 @@
if !ok {
return false
}
- return list.v.Type().Elem() == c.goType && list.IsValid()
+ return list.v.Type().Elem() == c.goType
}
func (c *listConverter) IsValidGo(v reflect.Value) bool {
- return v.Type() == c.goType
+ return v.IsValid() && v.Type() == c.goType
}
func (c *listConverter) New() pref.Value {
@@ -64,7 +64,7 @@
}
type listPtrConverter struct {
- goType reflect.Type
+ goType reflect.Type // *[]T
c Converter
}
@@ -88,7 +88,7 @@
}
func (c *listPtrConverter) IsValidGo(v reflect.Value) bool {
- return v.Type() == c.goType
+ return v.IsValid() && v.Type() == c.goType
}
func (c *listPtrConverter) New() pref.Value {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
index 3ef36d3..de06b25 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
@@ -12,7 +12,7 @@
)
type mapConverter struct {
- goType reflect.Type
+ goType reflect.Type // map[K]V
keyConv, valConv Converter
}
@@ -43,11 +43,11 @@
if !ok {
return false
}
- return mapv.v.Type() == c.goType && mapv.IsValid()
+ return mapv.v.Type() == c.goType
}
func (c *mapConverter) IsValidGo(v reflect.Value) bool {
- return v.Type() == c.goType
+ return v.IsValid() && v.Type() == c.goType
}
func (c *mapConverter) New() pref.Value {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go
index 85ba1d3..949dc49 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/decode.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go
@@ -17,6 +17,8 @@
piface "google.golang.org/protobuf/runtime/protoiface"
)
+var errDecode = errors.New("cannot parse invalid wire-format data")
+
type unmarshalOptions struct {
flags protoiface.UnmarshalInputFlags
resolver interface {
@@ -100,13 +102,13 @@
var n int
tag, n = protowire.ConsumeVarint(b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
b = b[n:]
}
var num protowire.Number
if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
- return out, errors.New("invalid field number")
+ return out, errDecode
} else {
num = protowire.Number(n)
}
@@ -114,7 +116,7 @@
if wtyp == protowire.EndGroupType {
if num != groupTag {
- return out, errors.New("mismatching end group marker")
+ return out, errDecode
}
groupTag = 0
break
@@ -170,10 +172,10 @@
}
n = protowire.ConsumeFieldValue(num, wtyp, b)
if n < 0 {
- return out, protowire.ParseError(n)
+ return out, errDecode
}
if !opts.DiscardUnknown() && mi.unknownOffset.IsValid() {
- u := p.Apply(mi.unknownOffset).Bytes()
+ u := mi.mutableUnknownBytes(p)
*u = protowire.AppendTag(*u, num, wtyp)
*u = append(*u, b[:n]...)
}
@@ -181,7 +183,7 @@
b = b[n:]
}
if groupTag != 0 {
- return out, errors.New("missing end group marker")
+ return out, errDecode
}
if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) {
initialized = false
@@ -221,7 +223,7 @@
return out, nil
}
case ValidationInvalid:
- return out, errors.New("invalid wire format")
+ return out, errDecode
case ValidationUnknown:
}
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go
index 8c8a794..845c67d 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/encode.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go
@@ -79,8 +79,9 @@
size += f.funcs.size(fptr, f, opts)
}
if mi.unknownOffset.IsValid() {
- u := *p.Apply(mi.unknownOffset).Bytes()
- size += len(u)
+ if u := mi.getUnknownBytes(p); u != nil {
+ size += len(*u)
+ }
}
if mi.sizecacheOffset.IsValid() {
if size > math.MaxInt32 {
@@ -141,8 +142,9 @@
}
}
if mi.unknownOffset.IsValid() && !mi.isMessageSet {
- u := *p.Apply(mi.unknownOffset).Bytes()
- b = append(b, u...)
+ if u := mi.getUnknownBytes(p); u != nil {
+ b = append(b, (*u)...)
+ }
}
return b, nil
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go
index 94f4572..e3fb0b5 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go
@@ -7,14 +7,12 @@
import (
"encoding/binary"
"encoding/json"
- "fmt"
"hash/crc32"
"math"
"reflect"
"google.golang.org/protobuf/internal/errors"
pref "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
piface "google.golang.org/protobuf/runtime/protoiface"
)
@@ -32,7 +30,7 @@
if mv := (Export{}).protoMessageV2Of(m); mv != nil {
return mv.ProtoReflect().Type()
}
- return legacyLoadMessageInfo(reflect.TypeOf(m), name)
+ return legacyLoadMessageType(reflect.TypeOf(m), name)
}
// UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input.
@@ -92,13 +90,3 @@
out = append(out, gzipFooter[:]...)
return out
}
-
-// WeakNil returns a typed nil pointer to a concrete message.
-// It panics if the message is not linked into the binary.
-func (Export) WeakNil(s pref.FullName) piface.MessageV1 {
- mt, err := protoregistry.GlobalTypes.FindMessageByName(s)
- if err != nil {
- panic(fmt.Sprintf("weak message %v is not linked in", s))
- }
- return mt.Zero().Interface().(piface.MessageV1)
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
index 93c318f..49e7231 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
@@ -154,7 +154,10 @@
func (x placeholderExtension) Cardinality() pref.Cardinality { return 0 }
func (x placeholderExtension) Kind() pref.Kind { return 0 }
func (x placeholderExtension) HasJSONName() bool { return false }
-func (x placeholderExtension) JSONName() string { return "" }
+func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" }
+func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" }
+func (x placeholderExtension) HasPresence() bool { return false }
+func (x placeholderExtension) HasOptionalKeyword() bool { return false }
func (x placeholderExtension) IsExtension() bool { return true }
func (x placeholderExtension) IsWeak() bool { return false }
func (x placeholderExtension) IsPacked() bool { return false }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
index 06c68e1..029feee 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
@@ -24,14 +24,24 @@
// legacyWrapMessage wraps v as a protoreflect.Message,
// where v must be a *struct kind and not implement the v2 API already.
func legacyWrapMessage(v reflect.Value) pref.Message {
- typ := v.Type()
- if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct {
+ t := v.Type()
+ if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct {
return aberrantMessage{v: v}
}
- mt := legacyLoadMessageInfo(typ, "")
+ mt := legacyLoadMessageInfo(t, "")
return mt.MessageOf(v.Interface())
}
+// legacyLoadMessageType dynamically loads a protoreflect.Type for t,
+// where t must be not implement the v2 API already.
+// The provided name is used if it cannot be determined from the message.
+func legacyLoadMessageType(t reflect.Type, name pref.FullName) protoreflect.MessageType {
+ if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct {
+ return aberrantMessageType{t}
+ }
+ return legacyLoadMessageInfo(t, name)
+}
+
var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo
// legacyLoadMessageInfo dynamically loads a *MessageInfo for t,
@@ -49,8 +59,9 @@
GoReflectType: t,
}
+ var hasMarshal, hasUnmarshal bool
v := reflect.Zero(t).Interface()
- if _, ok := v.(legacyMarshaler); ok {
+ if _, hasMarshal = v.(legacyMarshaler); hasMarshal {
mi.methods.Marshal = legacyMarshal
// We have no way to tell whether the type's Marshal method
@@ -59,10 +70,10 @@
// calling Marshal methods when present.
mi.methods.Flags |= piface.SupportMarshalDeterministic
}
- if _, ok := v.(legacyUnmarshaler); ok {
+ if _, hasUnmarshal = v.(legacyUnmarshaler); hasUnmarshal {
mi.methods.Unmarshal = legacyUnmarshal
}
- if _, ok := v.(legacyMerger); ok {
+ if _, hasMerge := v.(legacyMerger); hasMerge || (hasMarshal && hasUnmarshal) {
mi.methods.Merge = legacyMerge
}
@@ -75,7 +86,7 @@
var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDescriptor
// LegacyLoadMessageDesc returns an MessageDescriptor derived from the Go type,
-// which must be a *struct kind and not implement the v2 API already.
+// which should be a *struct kind and must not implement the v2 API already.
//
// This is exported for testing purposes.
func LegacyLoadMessageDesc(t reflect.Type) pref.MessageDescriptor {
@@ -114,17 +125,19 @@
// If the Go type has no fields, then this might be a proto3 empty message
// from before the size cache was added. If there are any fields, check to
// see that at least one of them looks like something we generated.
- if nfield := t.Elem().NumField(); nfield > 0 {
- hasProtoField := false
- for i := 0; i < nfield; i++ {
- f := t.Elem().Field(i)
- if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") {
- hasProtoField = true
- break
+ if t.Elem().Kind() == reflect.Struct {
+ if nfield := t.Elem().NumField(); nfield > 0 {
+ hasProtoField := false
+ for i := 0; i < nfield; i++ {
+ f := t.Elem().Field(i)
+ if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") {
+ hasProtoField = true
+ break
+ }
}
- }
- if !hasProtoField {
- return aberrantLoadMessageDesc(t, name)
+ if !hasProtoField {
+ return aberrantLoadMessageDesc(t, name)
+ }
}
}
@@ -370,7 +383,7 @@
Merge(protoiface.MessageV1)
}
-var legacyProtoMethods = &piface.Methods{
+var aberrantProtoMethods = &piface.Methods{
Marshal: legacyMarshal,
Unmarshal: legacyUnmarshal,
Merge: legacyMerge,
@@ -401,18 +414,47 @@
v := in.Message.(unwrapper).protoUnwrap()
unmarshaler, ok := v.(legacyUnmarshaler)
if !ok {
- return piface.UnmarshalOutput{}, errors.New("%T does not implement Marshal", v)
+ return piface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v)
}
return piface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf)
}
func legacyMerge(in piface.MergeInput) piface.MergeOutput {
+ // Check whether this supports the legacy merger.
dstv := in.Destination.(unwrapper).protoUnwrap()
merger, ok := dstv.(legacyMerger)
+ if ok {
+ merger.Merge(Export{}.ProtoMessageV1Of(in.Source))
+ return piface.MergeOutput{Flags: piface.MergeComplete}
+ }
+
+ // If legacy merger is unavailable, implement merge in terms of
+ // a marshal and unmarshal operation.
+ srcv := in.Source.(unwrapper).protoUnwrap()
+ marshaler, ok := srcv.(legacyMarshaler)
if !ok {
return piface.MergeOutput{}
}
- merger.Merge(Export{}.ProtoMessageV1Of(in.Source))
+ dstv = in.Destination.(unwrapper).protoUnwrap()
+ unmarshaler, ok := dstv.(legacyUnmarshaler)
+ if !ok {
+ return piface.MergeOutput{}
+ }
+ if !in.Source.IsValid() {
+ // Legacy Marshal methods may not function on nil messages.
+ // Check for a typed nil source only after we confirm that
+ // legacy Marshal/Unmarshal methods are present, for
+ // consistency.
+ return piface.MergeOutput{Flags: piface.MergeComplete}
+ }
+ b, err := marshaler.Marshal()
+ if err != nil {
+ return piface.MergeOutput{}
+ }
+ err = unmarshaler.Unmarshal(b)
+ if err != nil {
+ return piface.MergeOutput{}
+ }
return piface.MergeOutput{Flags: piface.MergeComplete}
}
@@ -422,6 +464,9 @@
}
func (mt aberrantMessageType) New() pref.Message {
+ if mt.t.Kind() == reflect.Ptr {
+ return aberrantMessage{reflect.New(mt.t.Elem())}
+ }
return aberrantMessage{reflect.Zero(mt.t)}
}
func (mt aberrantMessageType) Zero() pref.Message {
@@ -443,6 +488,17 @@
v reflect.Value
}
+// Reset implements the v1 proto.Message.Reset method.
+func (m aberrantMessage) Reset() {
+ if mr, ok := m.v.Interface().(interface{ Reset() }); ok {
+ mr.Reset()
+ return
+ }
+ if m.v.Kind() == reflect.Ptr && !m.v.IsNil() {
+ m.v.Elem().Set(reflect.Zero(m.v.Type().Elem()))
+ }
+}
+
func (m aberrantMessage) ProtoReflect() pref.Message {
return m
}
@@ -454,33 +510,40 @@
return aberrantMessageType{m.v.Type()}
}
func (m aberrantMessage) New() pref.Message {
+ if m.v.Type().Kind() == reflect.Ptr {
+ return aberrantMessage{reflect.New(m.v.Type().Elem())}
+ }
return aberrantMessage{reflect.Zero(m.v.Type())}
}
func (m aberrantMessage) Interface() pref.ProtoMessage {
return m
}
func (m aberrantMessage) Range(f func(pref.FieldDescriptor, pref.Value) bool) {
+ return
}
func (m aberrantMessage) Has(pref.FieldDescriptor) bool {
- panic("invalid field descriptor")
+ return false
}
func (m aberrantMessage) Clear(pref.FieldDescriptor) {
- panic("invalid field descriptor")
+ panic("invalid Message.Clear on " + string(m.Descriptor().FullName()))
}
-func (m aberrantMessage) Get(pref.FieldDescriptor) pref.Value {
- panic("invalid field descriptor")
+func (m aberrantMessage) Get(fd pref.FieldDescriptor) pref.Value {
+ if fd.Default().IsValid() {
+ return fd.Default()
+ }
+ panic("invalid Message.Get on " + string(m.Descriptor().FullName()))
}
func (m aberrantMessage) Set(pref.FieldDescriptor, pref.Value) {
- panic("invalid field descriptor")
+ panic("invalid Message.Set on " + string(m.Descriptor().FullName()))
}
func (m aberrantMessage) Mutable(pref.FieldDescriptor) pref.Value {
- panic("invalid field descriptor")
+ panic("invalid Message.Mutable on " + string(m.Descriptor().FullName()))
}
func (m aberrantMessage) NewField(pref.FieldDescriptor) pref.Value {
- panic("invalid field descriptor")
+ panic("invalid Message.NewField on " + string(m.Descriptor().FullName()))
}
func (m aberrantMessage) WhichOneof(pref.OneofDescriptor) pref.FieldDescriptor {
- panic("invalid oneof descriptor")
+ panic("invalid Message.WhichOneof descriptor on " + string(m.Descriptor().FullName()))
}
func (m aberrantMessage) GetUnknown() pref.RawFields {
return nil
@@ -489,13 +552,13 @@
// SetUnknown discards its input on messages which don't support unknown field storage.
}
func (m aberrantMessage) IsValid() bool {
- // An invalid message is a read-only, empty message. Since we don't know anything
- // about the alleged contents of this message, we can't say with confidence that
- // it is invalid in this sense. Therefore, report it as valid.
- return true
+ if m.v.Kind() == reflect.Ptr {
+ return !m.v.IsNil()
+ }
+ return false
}
func (m aberrantMessage) ProtoMethods() *piface.Methods {
- return legacyProtoMethods
+ return aberrantProtoMethods
}
func (m aberrantMessage) protoUnwrap() interface{} {
return m.v.Interface()
diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go
index cdc4267..c65bbc0 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/merge.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go
@@ -77,9 +77,9 @@
}
}
if mi.unknownOffset.IsValid() {
- du := dst.Apply(mi.unknownOffset).Bytes()
- su := src.Apply(mi.unknownOffset).Bytes()
- if len(*su) > 0 {
+ su := mi.getUnknownBytes(src)
+ if su != nil && len(*su) > 0 {
+ du := mi.mutableUnknownBytes(dst)
*du = append(*du, *su...)
}
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go
index c1d8902..a104e28 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message.go
@@ -12,10 +12,10 @@
"sync"
"sync/atomic"
- "google.golang.org/protobuf/internal/genname"
+ "google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/reflect/protoreflect"
pref "google.golang.org/protobuf/reflect/protoreflect"
- piface "google.golang.org/protobuf/runtime/protoiface"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
)
// MessageInfo provides protobuf related functionality for a given Go type
@@ -109,23 +109,30 @@
type (
SizeCache = int32
- WeakFields = map[int32]piface.MessageV1
- UnknownFields = []byte
+ WeakFields = map[int32]protoreflect.ProtoMessage
+ UnknownFields = unknownFieldsA // TODO: switch to unknownFieldsB
+ unknownFieldsA = []byte
+ unknownFieldsB = *[]byte
ExtensionFields = map[int32]ExtensionField
)
var (
sizecacheType = reflect.TypeOf(SizeCache(0))
weakFieldsType = reflect.TypeOf(WeakFields(nil))
- unknownFieldsType = reflect.TypeOf(UnknownFields(nil))
+ unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil))
+ unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil))
extensionFieldsType = reflect.TypeOf(ExtensionFields(nil))
)
type structInfo struct {
sizecacheOffset offset
+ sizecacheType reflect.Type
weakOffset offset
+ weakType reflect.Type
unknownOffset offset
+ unknownType reflect.Type
extensionOffset offset
+ extensionType reflect.Type
fieldsByNumber map[pref.FieldNumber]reflect.StructField
oneofsByName map[pref.Name]reflect.StructField
@@ -149,21 +156,25 @@
fieldLoop:
for i := 0; i < t.NumField(); i++ {
switch f := t.Field(i); f.Name {
- case genname.SizeCache, genname.SizeCacheA:
+ case genid.SizeCache_goname, genid.SizeCacheA_goname:
if f.Type == sizecacheType {
si.sizecacheOffset = offsetOf(f, mi.Exporter)
+ si.sizecacheType = f.Type
}
- case genname.WeakFields, genname.WeakFieldsA:
+ case genid.WeakFields_goname, genid.WeakFieldsA_goname:
if f.Type == weakFieldsType {
si.weakOffset = offsetOf(f, mi.Exporter)
+ si.weakType = f.Type
}
- case genname.UnknownFields, genname.UnknownFieldsA:
- if f.Type == unknownFieldsType {
+ case genid.UnknownFields_goname, genid.UnknownFieldsA_goname:
+ if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType {
si.unknownOffset = offsetOf(f, mi.Exporter)
+ si.unknownType = f.Type
}
- case genname.ExtensionFields, genname.ExtensionFieldsA, genname.ExtensionFieldsB:
+ case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname:
if f.Type == extensionFieldsType {
si.extensionOffset = offsetOf(f, mi.Exporter)
+ si.extensionType = f.Type
}
default:
for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") {
@@ -213,4 +224,53 @@
func (mi *MessageInfo) Zero() protoreflect.Message {
return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface())
}
-func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor { return mi.Desc }
+func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor {
+ return mi.Desc
+}
+func (mi *MessageInfo) Enum(i int) protoreflect.EnumType {
+ mi.init()
+ fd := mi.Desc.Fields().Get(i)
+ return Export{}.EnumTypeOf(mi.fieldTypes[fd.Number()])
+}
+func (mi *MessageInfo) Message(i int) protoreflect.MessageType {
+ mi.init()
+ fd := mi.Desc.Fields().Get(i)
+ switch {
+ case fd.IsWeak():
+ mt, _ := preg.GlobalTypes.FindMessageByName(fd.Message().FullName())
+ return mt
+ case fd.IsMap():
+ return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]}
+ default:
+ return Export{}.MessageTypeOf(mi.fieldTypes[fd.Number()])
+ }
+}
+
+type mapEntryType struct {
+ desc protoreflect.MessageDescriptor
+ valType interface{} // zero value of enum or message type
+}
+
+func (mt mapEntryType) New() protoreflect.Message {
+ return nil
+}
+func (mt mapEntryType) Zero() protoreflect.Message {
+ return nil
+}
+func (mt mapEntryType) Descriptor() protoreflect.MessageDescriptor {
+ return mt.desc
+}
+func (mt mapEntryType) Enum(i int) protoreflect.EnumType {
+ fd := mt.desc.Fields().Get(i)
+ if fd.Enum() == nil {
+ return nil
+ }
+ return Export{}.EnumTypeOf(mt.valType)
+}
+func (mt mapEntryType) Message(i int) protoreflect.MessageType {
+ fd := mt.desc.Fields().Get(i)
+ if fd.Message() == nil {
+ return nil
+ }
+ return Export{}.MessageTypeOf(mt.valType)
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
index aac55ee..9488b72 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
@@ -8,6 +8,7 @@
"fmt"
"reflect"
+ "google.golang.org/protobuf/internal/detrand"
"google.golang.org/protobuf/internal/pragma"
pref "google.golang.org/protobuf/reflect/protoreflect"
)
@@ -16,6 +17,11 @@
fields map[pref.FieldNumber]*fieldInfo
oneofs map[pref.Name]*oneofInfo
+ // fieldTypes contains the zero value of an enum or message field.
+ // For lists, it contains the element type.
+ // For maps, it contains the entry value type.
+ fieldTypes map[pref.FieldNumber]interface{}
+
// denseFields is a subset of fields where:
// 0 < fieldDesc.Number() < len(denseFields)
// It provides faster access to the fieldInfo, but may be incomplete.
@@ -36,6 +42,7 @@
mi.makeKnownFieldsFunc(si)
mi.makeUnknownFieldsFunc(t, si)
mi.makeExtensionFieldsFunc(t, si)
+ mi.makeFieldTypes(si)
}
// makeKnownFieldsFunc generates functions for operations that can be performed
@@ -51,17 +58,23 @@
for i := 0; i < fds.Len(); i++ {
fd := fds.Get(i)
fs := si.fieldsByNumber[fd.Number()]
+ isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic()
+ if isOneof {
+ fs = si.oneofsByName[fd.ContainingOneof().Name()]
+ }
var fi fieldInfo
switch {
- case fd.ContainingOneof() != nil:
- fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()])
+ case fs.Type == nil:
+ fi = fieldInfoForMissing(fd) // never occurs for officially generated message types
+ case isOneof:
+ fi = fieldInfoForOneof(fd, fs, mi.Exporter, si.oneofWrappersByNumber[fd.Number()])
case fd.IsMap():
fi = fieldInfoForMap(fd, fs, mi.Exporter)
case fd.IsList():
fi = fieldInfoForList(fd, fs, mi.Exporter)
case fd.IsWeak():
fi = fieldInfoForWeakMessage(fd, si.weakOffset)
- case fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind:
+ case fd.Message() != nil:
fi = fieldInfoForMessage(fd, fs, mi.Exporter)
default:
fi = fieldInfoForScalar(fd, fs, mi.Exporter)
@@ -72,7 +85,7 @@
mi.oneofs = map[pref.Name]*oneofInfo{}
for i := 0; i < md.Oneofs().Len(); i++ {
od := md.Oneofs().Get(i)
- mi.oneofs[od.Name()] = makeOneofInfo(od, si.oneofsByName[od.Name()], mi.Exporter, si.oneofWrappersByType)
+ mi.oneofs[od.Name()] = makeOneofInfo(od, si, mi.Exporter)
}
mi.denseFields = make([]*fieldInfo, fds.Len()*2)
@@ -84,7 +97,7 @@
for i := 0; i < fds.Len(); {
fd := fds.Get(i)
- if od := fd.ContainingOneof(); od != nil {
+ if od := fd.ContainingOneof(); od != nil && !od.IsSynthetic() {
mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()])
i += od.Fields().Len()
} else {
@@ -92,27 +105,53 @@
i++
}
}
+
+ // Introduce instability to iteration order, but keep it deterministic.
+ if len(mi.rangeInfos) > 1 && detrand.Bool() {
+ i := detrand.Intn(len(mi.rangeInfos) - 1)
+ mi.rangeInfos[i], mi.rangeInfos[i+1] = mi.rangeInfos[i+1], mi.rangeInfos[i]
+ }
}
func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) {
- mi.getUnknown = func(pointer) pref.RawFields { return nil }
- mi.setUnknown = func(pointer, pref.RawFields) { return }
- if si.unknownOffset.IsValid() {
+ switch {
+ case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsAType:
+ // Handle as []byte.
mi.getUnknown = func(p pointer) pref.RawFields {
if p.IsNil() {
return nil
}
- rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType)
- return pref.RawFields(*rv.Interface().(*[]byte))
+ return *p.Apply(mi.unknownOffset).Bytes()
}
mi.setUnknown = func(p pointer, b pref.RawFields) {
if p.IsNil() {
panic("invalid SetUnknown on nil Message")
}
- rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType)
- *rv.Interface().(*[]byte) = []byte(b)
+ *p.Apply(mi.unknownOffset).Bytes() = b
}
- } else {
+ case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsBType:
+ // Handle as *[]byte.
+ mi.getUnknown = func(p pointer) pref.RawFields {
+ if p.IsNil() {
+ return nil
+ }
+ bp := p.Apply(mi.unknownOffset).BytesPtr()
+ if *bp == nil {
+ return nil
+ }
+ return **bp
+ }
+ mi.setUnknown = func(p pointer, b pref.RawFields) {
+ if p.IsNil() {
+ panic("invalid SetUnknown on nil Message")
+ }
+ bp := p.Apply(mi.unknownOffset).BytesPtr()
+ if *bp == nil {
+ *bp = new([]byte)
+ }
+ **bp = b
+ }
+ default:
mi.getUnknown = func(pointer) pref.RawFields {
return nil
}
@@ -139,6 +178,58 @@
}
}
}
+func (mi *MessageInfo) makeFieldTypes(si structInfo) {
+ md := mi.Desc
+ fds := md.Fields()
+ for i := 0; i < fds.Len(); i++ {
+ var ft reflect.Type
+ fd := fds.Get(i)
+ fs := si.fieldsByNumber[fd.Number()]
+ isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic()
+ if isOneof {
+ fs = si.oneofsByName[fd.ContainingOneof().Name()]
+ }
+ var isMessage bool
+ switch {
+ case fs.Type == nil:
+ continue // never occurs for officially generated message types
+ case isOneof:
+ if fd.Enum() != nil || fd.Message() != nil {
+ ft = si.oneofWrappersByNumber[fd.Number()].Field(0).Type
+ }
+ case fd.IsMap():
+ if fd.MapValue().Enum() != nil || fd.MapValue().Message() != nil {
+ ft = fs.Type.Elem()
+ }
+ isMessage = fd.MapValue().Message() != nil
+ case fd.IsList():
+ if fd.Enum() != nil || fd.Message() != nil {
+ ft = fs.Type.Elem()
+ }
+ isMessage = fd.Message() != nil
+ case fd.Enum() != nil:
+ ft = fs.Type
+ if fd.HasPresence() && ft.Kind() == reflect.Ptr {
+ ft = ft.Elem()
+ }
+ case fd.Message() != nil:
+ ft = fs.Type
+ if fd.IsWeak() {
+ ft = nil
+ }
+ isMessage = true
+ }
+ if isMessage && ft != nil && ft.Kind() != reflect.Ptr {
+ ft = reflect.PtrTo(ft) // never occurs for officially generated message types
+ }
+ if ft != nil {
+ if mi.fieldTypes == nil {
+ mi.fieldTypes = make(map[pref.FieldNumber]interface{})
+ }
+ mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface()
+ }
+ }
+}
type extensionMap map[int32]ExtensionField
@@ -170,6 +261,8 @@
return x.Value().List().Len() > 0
case xd.IsMap():
return x.Value().Map().Len() > 0
+ case xd.Message() != nil:
+ return x.Value().Message().IsValid()
}
return true
}
@@ -186,15 +279,28 @@
return xt.Zero()
}
func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) {
- if !xt.IsValidValue(v) {
+ xd := xt.TypeDescriptor()
+ isValid := true
+ switch {
+ case !xt.IsValidValue(v):
+ isValid = false
+ case xd.IsList():
+ isValid = v.List().IsValid()
+ case xd.IsMap():
+ isValid = v.Map().IsValid()
+ case xd.Message() != nil:
+ isValid = v.Message().IsValid()
+ }
+ if !isValid {
panic(fmt.Sprintf("%v: assigning invalid value", xt.TypeDescriptor().FullName()))
}
+
if *m == nil {
*m = make(map[int32]ExtensionField)
}
var x ExtensionField
x.Set(xt, v)
- (*m)[int32(xt.TypeDescriptor().Number())] = x
+ (*m)[int32(xd.Number())] = x
}
func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value {
xd := xt.TypeDescriptor()
@@ -291,7 +397,6 @@
// pointer to a named Go struct. If the provided type has a ProtoReflect method,
// it must be implemented by calling this method.
func (mi *MessageInfo) MessageOf(m interface{}) pref.Message {
- // TODO: Switch the input to be an opaque Pointer.
if reflect.TypeOf(m) != mi.GoReflectType {
panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType))
}
@@ -305,6 +410,17 @@
func (m *messageReflectWrapper) pointer() pointer { return m.p }
func (m *messageReflectWrapper) messageInfo() *MessageInfo { return m.mi }
+// Reset implements the v1 proto.Message.Reset method.
+func (m *messageIfaceWrapper) Reset() {
+ if mr, ok := m.protoUnwrap().(interface{ Reset() }); ok {
+ mr.Reset()
+ return
+ }
+ rv := reflect.ValueOf(m.protoUnwrap())
+ if rv.Kind() == reflect.Ptr && !rv.IsNil() {
+ rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
+ }
+}
func (m *messageIfaceWrapper) ProtoReflect() pref.Message {
return (*messageReflectWrapper)(m)
}
@@ -323,24 +439,27 @@
}
if fi != nil {
if fi.fieldDesc != fd {
- panic("mismatching field descriptor")
+ if got, want := fd.FullName(), fi.fieldDesc.FullName(); got != want {
+ panic(fmt.Sprintf("mismatching field: got %v, want %v", got, want))
+ }
+ panic(fmt.Sprintf("mismatching field: %v", fd.FullName()))
}
return fi, nil
}
if fd.IsExtension() {
- if fd.ContainingMessage().FullName() != mi.Desc.FullName() {
+ if got, want := fd.ContainingMessage().FullName(), mi.Desc.FullName(); got != want {
// TODO: Should this be exact containing message descriptor match?
- panic("mismatching containing message")
+ panic(fmt.Sprintf("extension %v has mismatching containing message: got %v, want %v", fd.FullName(), got, want))
}
if !mi.Desc.ExtensionRanges().Has(fd.Number()) {
- panic("invalid extension field")
+ panic(fmt.Sprintf("extension %v extends %v outside the extension range", fd.FullName(), mi.Desc.FullName()))
}
xtd, ok := fd.(pref.ExtensionTypeDescriptor)
if !ok {
- panic("extension descriptor does not implement ExtensionTypeDescriptor")
+ panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName()))
}
return nil, xtd.Type()
}
- panic("invalid field descriptor")
+ panic(fmt.Sprintf("field %v is invalid", fd.FullName()))
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
index 7b87d47..343cf87 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
@@ -28,16 +28,49 @@
newField func() pref.Value
}
+func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo {
+ // This never occurs for generated message types.
+ // It implies that a hand-crafted type has missing Go fields
+ // for specific protobuf message fields.
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ return false
+ },
+ clear: func(p pointer) {
+ panic("missing Go struct field for " + string(fd.FullName()))
+ },
+ get: func(p pointer) pref.Value {
+ return fd.Default()
+ },
+ set: func(p pointer, v pref.Value) {
+ panic("missing Go struct field for " + string(fd.FullName()))
+ },
+ mutable: func(p pointer) pref.Value {
+ panic("missing Go struct field for " + string(fd.FullName()))
+ },
+ newMessage: func() pref.Message {
+ panic("missing Go struct field for " + string(fd.FullName()))
+ },
+ newField: func() pref.Value {
+ if v := fd.Default(); v.IsValid() {
+ return v
+ }
+ panic("missing Go struct field for " + string(fd.FullName()))
+ },
+ }
+}
+
func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo {
ft := fs.Type
if ft.Kind() != reflect.Interface {
- panic(fmt.Sprintf("invalid type: got %v, want interface kind", ft))
+ panic(fmt.Sprintf("field %v has invalid type: got %v, want interface kind", fd.FullName(), ft))
}
if ot.Kind() != reflect.Struct {
- panic(fmt.Sprintf("invalid type: got %v, want struct kind", ot))
+ panic(fmt.Sprintf("field %v has invalid type: got %v, want struct kind", fd.FullName(), ot))
}
if !reflect.PtrTo(ot).Implements(ft) {
- panic(fmt.Sprintf("invalid type: %v does not implement %v", ot, ft))
+ panic(fmt.Sprintf("field %v has invalid type: %v does not implement %v", fd.FullName(), ot, ft))
}
conv := NewConverter(ot.Field(0).Type, fd)
isMessage := fd.Message() != nil
@@ -90,14 +123,14 @@
},
mutable: func(p pointer) pref.Value {
if !isMessage {
- panic("invalid Mutable on field with non-composite type")
+ panic(fmt.Sprintf("field %v with invalid Mutable call on field with non-composite type", fd.FullName()))
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() {
rv.Set(reflect.New(ot))
}
rv = rv.Elem().Elem().Field(0)
- if rv.IsNil() {
+ if rv.Kind() == reflect.Ptr && rv.IsNil() {
rv.Set(conv.GoValueOf(pref.ValueOfMessage(conv.New().Message())))
}
return conv.PBValueOf(rv)
@@ -114,7 +147,7 @@
func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
ft := fs.Type
if ft.Kind() != reflect.Map {
- panic(fmt.Sprintf("invalid type: got %v, want map kind", ft))
+ panic(fmt.Sprintf("field %v has invalid type: got %v, want map kind", fd.FullName(), ft))
}
conv := NewConverter(ft, fd)
@@ -147,7 +180,7 @@
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
pv := conv.GoValueOf(v)
if pv.IsNil() {
- panic(fmt.Sprintf("invalid value: setting map field to read-only value"))
+ panic(fmt.Sprintf("map field %v cannot be set with read-only value", fd.FullName()))
}
rv.Set(pv)
},
@@ -167,7 +200,7 @@
func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
ft := fs.Type
if ft.Kind() != reflect.Slice {
- panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
+ panic(fmt.Sprintf("field %v has invalid type: got %v, want slice kind", fd.FullName(), ft))
}
conv := NewConverter(reflect.PtrTo(ft), fd)
@@ -200,7 +233,7 @@
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
pv := conv.GoValueOf(v)
if pv.IsNil() {
- panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
+ panic(fmt.Sprintf("list field %v cannot be set with read-only value", fd.FullName()))
}
rv.Set(pv.Elem())
},
@@ -221,11 +254,14 @@
func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
ft := fs.Type
- nullable := fd.Syntax() == pref.Proto2
+ nullable := fd.HasPresence()
isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8
if nullable {
if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice {
- panic(fmt.Sprintf("invalid type: got %v, want pointer", ft))
+ // This never occurs for generated message types.
+ // Despite the protobuf type system specifying presence,
+ // the Go field type cannot represent it.
+ nullable = false
}
if ft.Kind() == reflect.Ptr {
ft = ft.Elem()
@@ -257,7 +293,7 @@
case reflect.String, reflect.Slice:
return rv.Len() > 0
default:
- panic(fmt.Sprintf("invalid type: %v", rv.Type())) // should never happen
+ panic(fmt.Sprintf("field %v has invalid type: %v", fd.FullName(), rv.Type())) // should never happen
}
},
clear: func(p pointer) {
@@ -290,9 +326,9 @@
rv.Set(conv.GoValueOf(v))
if isBytes && rv.Len() == 0 {
if nullable {
- rv.Set(emptyBytes) // preserve presence in proto2
+ rv.Set(emptyBytes) // preserve presence
} else {
- rv.Set(nilBytes) // do not preserve presence in proto3
+ rv.Set(nilBytes) // do not preserve presence
}
}
},
@@ -314,7 +350,7 @@
messageName := fd.Message().FullName()
messageType, _ = preg.GlobalTypes.FindMessageByName(messageName)
if messageType == nil {
- panic(fmt.Sprintf("weak message %v is not linked in", messageName))
+ panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName()))
}
})
}
@@ -347,7 +383,10 @@
lazyInit()
m := v.Message()
if m.Descriptor() != messageType.Descriptor() {
- panic("mismatching message descriptor")
+ if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want {
+ panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want))
+ }
+ panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName()))
}
p.Apply(weakOffset).WeakFields().set(num, m.Interface())
},
@@ -385,6 +424,9 @@
return false
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if fs.Type.Kind() != reflect.Ptr {
+ return !isZero(rv)
+ }
return !rv.IsNil()
},
clear: func(p pointer) {
@@ -401,13 +443,13 @@
set: func(p pointer, v pref.Value) {
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
rv.Set(conv.GoValueOf(v))
- if rv.IsNil() {
- panic("invalid nil pointer")
+ if fs.Type.Kind() == reflect.Ptr && rv.IsNil() {
+ panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName()))
}
},
mutable: func(p pointer) pref.Value {
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
- if rv.IsNil() {
+ if fs.Type.Kind() == reflect.Ptr && rv.IsNil() {
rv.Set(conv.GoValueOf(conv.New()))
}
return conv.PBValueOf(rv)
@@ -426,11 +468,25 @@
which func(pointer) pref.FieldNumber
}
-func makeOneofInfo(od pref.OneofDescriptor, fs reflect.StructField, x exporter, wrappersByType map[reflect.Type]pref.FieldNumber) *oneofInfo {
- fieldOffset := offsetOf(fs, x)
- return &oneofInfo{
- oneofDesc: od,
- which: func(p pointer) pref.FieldNumber {
+func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInfo {
+ oi := &oneofInfo{oneofDesc: od}
+ if od.IsSynthetic() {
+ fs := si.fieldsByNumber[od.Fields().Get(0).Number()]
+ fieldOffset := offsetOf(fs, x)
+ oi.which = func(p pointer) pref.FieldNumber {
+ if p.IsNil() {
+ return 0
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() { // valid on either *T or []byte
+ return 0
+ }
+ return od.Fields().Get(0).Number()
+ }
+ } else {
+ fs := si.oneofsByName[od.Name()]
+ fieldOffset := offsetOf(fs, x)
+ oi.which = func(p pointer) pref.FieldNumber {
if p.IsNil() {
return 0
}
@@ -442,7 +498,46 @@
if rv.IsNil() {
return 0
}
- return wrappersByType[rv.Type().Elem()]
- },
+ return si.oneofWrappersByType[rv.Type().Elem()]
+ }
+ }
+ return oi
+}
+
+// isZero is identical to reflect.Value.IsZero.
+// TODO: Remove this when Go1.13 is the minimally supported Go version.
+func isZero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return math.Float64bits(v.Float()) == 0
+ case reflect.Complex64, reflect.Complex128:
+ c := v.Complex()
+ return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0
+ case reflect.Array:
+ for i := 0; i < v.Len(); i++ {
+ if !isZero(v.Index(i)) {
+ return false
+ }
+ }
+ return true
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
+ return v.IsNil()
+ case reflect.String:
+ return v.Len() == 0
+ case reflect.Struct:
+ for i := 0; i < v.NumField(); i++ {
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ default:
+ panic(&reflect.ValueError{"reflect.Value.IsZero", v.Kind()})
}
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go
index 7d65f5c..741d6e5 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go
@@ -114,7 +114,7 @@
if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od {
return od.Fields().ByNumber(oi.which(m.pointer()))
}
- panic("invalid oneof descriptor")
+ panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName()))
}
func (m *messageState) GetUnknown() protoreflect.RawFields {
m.messageInfo().init()
@@ -234,7 +234,7 @@
if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od {
return od.Fields().ByNumber(oi.which(m.pointer()))
}
- panic("invalid oneof descriptor")
+ panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName()))
}
func (m *messageReflectWrapper) GetUnknown() protoreflect.RawFields {
m.messageInfo().init()
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
index 67b4ede..9e3ed82 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
@@ -121,6 +121,7 @@
func (p pointer) StringPtr() **string { return p.v.Interface().(**string) }
func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) }
func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) }
+func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) }
func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) }
func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) }
func (p pointer) Extensions() *map[int32]ExtensionField {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
index 2741643..9ecf23a 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
@@ -109,6 +109,7 @@
func (p pointer) StringPtr() **string { return (**string)(p.p) }
func (p pointer) StringSlice() *[]string { return (*[]string)(p.p) }
func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) }
+func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) }
func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) }
func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) }
func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) }
@@ -148,7 +149,11 @@
return pointer{p: unsafe.Pointer(ms)}
}
func (ms *messageState) messageInfo() *MessageInfo {
- return ms.LoadMessageInfo()
+ mi := ms.LoadMessageInfo()
+ if mi == nil {
+ panic("invalid nil message info; this suggests memory corruption due to a race or shallow copy on the message struct")
+ }
+ return mi
}
func (ms *messageState) LoadMessageInfo() *MessageInfo {
return (*MessageInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&ms.atomicMessageInfo))))
diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go
index 39d62fd..08cfb60 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/validate.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go
@@ -14,6 +14,7 @@
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/strs"
pref "google.golang.org/protobuf/reflect/protoreflect"
preg "google.golang.org/protobuf/reflect/protoregistry"
@@ -108,7 +109,7 @@
func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescriptor, ft reflect.Type) validationInfo {
var vi validationInfo
switch {
- case fd.ContainingOneof() != nil:
+ case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
switch fd.Kind() {
case pref.MessageKind:
vi.typ = validationTypeMessage
@@ -282,9 +283,9 @@
switch {
case st.typ == validationTypeMap:
switch num {
- case 1:
+ case genid.MapEntry_Key_field_number:
vi.typ = st.keyType
- case 2:
+ case genid.MapEntry_Value_field_number:
vi.typ = st.valType
vi.mi = st.mi
vi.requiredBit = 1
diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go
index 575c988..009cbef 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/weak.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/weak.go
@@ -5,9 +5,10 @@
package impl
import (
- "reflect"
+ "fmt"
pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
)
// weakFields adds methods to the exported WeakFields type for internal use.
@@ -16,31 +17,58 @@
// defined directly on it.
type weakFields WeakFields
-func (w *weakFields) get(num pref.FieldNumber) (_ pref.ProtoMessage, ok bool) {
- if *w == nil {
- return nil, false
- }
- m, ok := (*w)[int32(num)]
- if !ok {
- return nil, false
- }
- // As a legacy quirk, consider a typed nil to be unset.
- //
- // TODO: Consider fixing the generated set methods to clear the field
- // when provided with a typed nil.
- if v := reflect.ValueOf(m); v.Kind() == reflect.Ptr && v.IsNil() {
- return nil, false
- }
- return Export{}.ProtoMessageV2Of(m), true
+func (w weakFields) get(num pref.FieldNumber) (pref.ProtoMessage, bool) {
+ m, ok := w[int32(num)]
+ return m, ok
}
func (w *weakFields) set(num pref.FieldNumber, m pref.ProtoMessage) {
if *w == nil {
*w = make(weakFields)
}
- (*w)[int32(num)] = Export{}.ProtoMessageV1Of(m)
+ (*w)[int32(num)] = m
}
func (w *weakFields) clear(num pref.FieldNumber) {
delete(*w, int32(num))
}
+
+func (Export) HasWeak(w WeakFields, num pref.FieldNumber) bool {
+ _, ok := w[int32(num)]
+ return ok
+}
+
+func (Export) ClearWeak(w *WeakFields, num pref.FieldNumber) {
+ delete(*w, int32(num))
+}
+
+func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pref.ProtoMessage {
+ if m, ok := w[int32(num)]; ok {
+ return m
+ }
+ mt, _ := protoregistry.GlobalTypes.FindMessageByName(name)
+ if mt == nil {
+ panic(fmt.Sprintf("message %v for weak field is not linked in", name))
+ }
+ return mt.Zero().Interface()
+}
+
+func (Export) SetWeak(w *WeakFields, num pref.FieldNumber, name pref.FullName, m pref.ProtoMessage) {
+ if m != nil {
+ mt, _ := protoregistry.GlobalTypes.FindMessageByName(name)
+ if mt == nil {
+ panic(fmt.Sprintf("message %v for weak field is not linked in", name))
+ }
+ if mt != m.ProtoReflect().Type() {
+ panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface()))
+ }
+ }
+ if m == nil || !m.ProtoReflect().IsValid() {
+ delete(*w, int32(num))
+ return
+ }
+ if *w == nil {
+ *w = make(weakFields)
+ }
+ (*w)[int32(num)] = m
+}
diff --git a/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go b/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go
deleted file mode 100644
index a3de1cf..0000000
--- a/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package mapsort provides sorted access to maps.
-package mapsort
-
-import (
- "sort"
-
- "google.golang.org/protobuf/reflect/protoreflect"
-)
-
-// Range iterates over every map entry in sorted key order,
-// calling f for each key and value encountered.
-func Range(mapv protoreflect.Map, keyKind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) {
- var keys []protoreflect.MapKey
- mapv.Range(func(key protoreflect.MapKey, _ protoreflect.Value) bool {
- keys = append(keys, key)
- return true
- })
- sort.Slice(keys, func(i, j int) bool {
- switch keyKind {
- case protoreflect.BoolKind:
- return !keys[i].Bool() && keys[j].Bool()
- case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind,
- protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
- return keys[i].Int() < keys[j].Int()
- case protoreflect.Uint32Kind, protoreflect.Fixed32Kind,
- protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
- return keys[i].Uint() < keys[j].Uint()
- case protoreflect.StringKind:
- return keys[i].String() < keys[j].String()
- default:
- panic("invalid kind: " + keyKind.String())
- }
- })
- for _, key := range keys {
- if !f(key, mapv.Get(key)) {
- break
- }
- }
-}
diff --git a/vendor/google.golang.org/protobuf/internal/order/order.go b/vendor/google.golang.org/protobuf/internal/order/order.go
new file mode 100644
index 0000000..2a24953
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/order/order.go
@@ -0,0 +1,89 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package order
+
+import (
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// FieldOrder specifies the ordering to visit message fields.
+// It is a function that reports whether x is ordered before y.
+type FieldOrder func(x, y pref.FieldDescriptor) bool
+
+var (
+ // AnyFieldOrder specifies no specific field ordering.
+ AnyFieldOrder FieldOrder = nil
+
+ // LegacyFieldOrder sorts fields in the same ordering as emitted by
+ // wire serialization in the github.com/golang/protobuf implementation.
+ LegacyFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool {
+ ox, oy := x.ContainingOneof(), y.ContainingOneof()
+ inOneof := func(od pref.OneofDescriptor) bool {
+ return od != nil && !od.IsSynthetic()
+ }
+
+ // Extension fields sort before non-extension fields.
+ if x.IsExtension() != y.IsExtension() {
+ return x.IsExtension() && !y.IsExtension()
+ }
+ // Fields not within a oneof sort before those within a oneof.
+ if inOneof(ox) != inOneof(oy) {
+ return !inOneof(ox) && inOneof(oy)
+ }
+ // Fields in disjoint oneof sets are sorted by declaration index.
+ if ox != nil && oy != nil && ox != oy {
+ return ox.Index() < oy.Index()
+ }
+ // Fields sorted by field number.
+ return x.Number() < y.Number()
+ }
+
+ // NumberFieldOrder sorts fields by their field number.
+ NumberFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool {
+ return x.Number() < y.Number()
+ }
+
+ // IndexNameFieldOrder sorts non-extension fields before extension fields.
+ // Non-extensions are sorted according to their declaration index.
+ // Extensions are sorted according to their full name.
+ IndexNameFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool {
+ // Non-extension fields sort before extension fields.
+ if x.IsExtension() != y.IsExtension() {
+ return !x.IsExtension() && y.IsExtension()
+ }
+ // Extensions sorted by fullname.
+ if x.IsExtension() && y.IsExtension() {
+ return x.FullName() < y.FullName()
+ }
+ // Non-extensions sorted by declaration index.
+ return x.Index() < y.Index()
+ }
+)
+
+// KeyOrder specifies the ordering to visit map entries.
+// It is a function that reports whether x is ordered before y.
+type KeyOrder func(x, y pref.MapKey) bool
+
+var (
+ // AnyKeyOrder specifies no specific key ordering.
+ AnyKeyOrder KeyOrder = nil
+
+ // GenericKeyOrder sorts false before true, numeric keys in ascending order,
+ // and strings in lexicographical ordering according to UTF-8 codepoints.
+ GenericKeyOrder KeyOrder = func(x, y pref.MapKey) bool {
+ switch x.Interface().(type) {
+ case bool:
+ return !x.Bool() && y.Bool()
+ case int32, int64:
+ return x.Int() < y.Int()
+ case uint32, uint64:
+ return x.Uint() < y.Uint()
+ case string:
+ return x.String() < y.String()
+ default:
+ panic("invalid map key type")
+ }
+ }
+)
diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go
new file mode 100644
index 0000000..c8090e0
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/order/range.go
@@ -0,0 +1,115 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package order provides ordered access to messages and maps.
+package order
+
+import (
+ "sort"
+ "sync"
+
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type messageField struct {
+ fd pref.FieldDescriptor
+ v pref.Value
+}
+
+var messageFieldPool = sync.Pool{
+ New: func() interface{} { return new([]messageField) },
+}
+
+type (
+ // FieldRnger is an interface for visiting all fields in a message.
+ // The protoreflect.Message type implements this interface.
+ FieldRanger interface{ Range(VisitField) }
+ // VisitField is called everytime a message field is visited.
+ VisitField = func(pref.FieldDescriptor, pref.Value) bool
+)
+
+// RangeFields iterates over the fields of fs according to the specified order.
+func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) {
+ if less == nil {
+ fs.Range(fn)
+ return
+ }
+
+ // Obtain a pre-allocated scratch buffer.
+ p := messageFieldPool.Get().(*[]messageField)
+ fields := (*p)[:0]
+ defer func() {
+ if cap(fields) < 1024 {
+ *p = fields
+ messageFieldPool.Put(p)
+ }
+ }()
+
+ // Collect all fields in the message and sort them.
+ fs.Range(func(fd pref.FieldDescriptor, v pref.Value) bool {
+ fields = append(fields, messageField{fd, v})
+ return true
+ })
+ sort.Slice(fields, func(i, j int) bool {
+ return less(fields[i].fd, fields[j].fd)
+ })
+
+ // Visit the fields in the specified ordering.
+ for _, f := range fields {
+ if !fn(f.fd, f.v) {
+ return
+ }
+ }
+}
+
+type mapEntry struct {
+ k pref.MapKey
+ v pref.Value
+}
+
+var mapEntryPool = sync.Pool{
+ New: func() interface{} { return new([]mapEntry) },
+}
+
+type (
+ // EntryRanger is an interface for visiting all fields in a message.
+ // The protoreflect.Map type implements this interface.
+ EntryRanger interface{ Range(VisitEntry) }
+ // VisitEntry is called everytime a map entry is visited.
+ VisitEntry = func(pref.MapKey, pref.Value) bool
+)
+
+// RangeEntries iterates over the entries of es according to the specified order.
+func RangeEntries(es EntryRanger, less KeyOrder, fn VisitEntry) {
+ if less == nil {
+ es.Range(fn)
+ return
+ }
+
+ // Obtain a pre-allocated scratch buffer.
+ p := mapEntryPool.Get().(*[]mapEntry)
+ entries := (*p)[:0]
+ defer func() {
+ if cap(entries) < 1024 {
+ *p = entries
+ mapEntryPool.Put(p)
+ }
+ }()
+
+ // Collect all entries in the map and sort them.
+ es.Range(func(k pref.MapKey, v pref.Value) bool {
+ entries = append(entries, mapEntry{k, v})
+ return true
+ })
+ sort.Slice(entries, func(i, j int) bool {
+ return less(entries[i].k, entries[j].k)
+ })
+
+ // Visit the entries in the specified ordering.
+ for _, e := range entries {
+ if !fn(e.k, e.v) {
+ return
+ }
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index cd84284..14e774f 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -52,8 +52,8 @@
// 10. Send out the CL for review and submit it.
const (
Major = 1
- Minor = 21
- Patch = 0
+ Minor = 27
+ Patch = 1
PreRelease = ""
)
diff --git a/vendor/google.golang.org/protobuf/proto/checkinit.go b/vendor/google.golang.org/protobuf/proto/checkinit.go
index d7c9923..3e9a6a2 100644
--- a/vendor/google.golang.org/protobuf/proto/checkinit.go
+++ b/vendor/google.golang.org/protobuf/proto/checkinit.go
@@ -12,6 +12,12 @@
// CheckInitialized returns an error if any required fields in m are not set.
func CheckInitialized(m Message) error {
+ // Treat a nil message interface as an "untyped" empty message,
+ // which we assume to have no required fields.
+ if m == nil {
+ return nil
+ }
+
return checkInitialized(m.ProtoReflect())
}
diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go
index 1282147..49f9b8c 100644
--- a/vendor/google.golang.org/protobuf/proto/decode.go
+++ b/vendor/google.golang.org/protobuf/proto/decode.go
@@ -9,6 +9,7 @@
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
@@ -44,12 +45,14 @@
}
// Unmarshal parses the wire-format message in b and places the result in m.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
func Unmarshal(b []byte, m Message) error {
_, err := UnmarshalOptions{}.unmarshal(b, m.ProtoReflect())
return err
}
// Unmarshal parses the wire-format message in b and places the result in m.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error {
_, err := o.unmarshal(b, m.ProtoReflect())
return err
@@ -63,12 +66,15 @@
return o.unmarshal(in.Buf, in.Message)
}
+// unmarshal is a centralized function that all unmarshal operations go through.
+// For profiling purposes, avoid changing the name of this function or
+// introducing other code paths for unmarshal that do not go through this.
func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out protoiface.UnmarshalOutput, err error) {
if o.Resolver == nil {
o.Resolver = protoregistry.GlobalTypes
}
if !o.Merge {
- Reset(m.Interface()) // TODO
+ Reset(m.Interface())
}
allowPartial := o.AllowPartial
o.Merge = true
@@ -105,17 +111,17 @@
func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) error {
md := m.Descriptor()
if messageset.IsMessageSet(md) {
- return unmarshalMessageSet(b, m, o)
+ return o.unmarshalMessageSet(b, m)
}
fields := md.Fields()
for len(b) > 0 {
// Parse the tag (field number and wire type).
num, wtyp, tagLen := protowire.ConsumeTag(b)
if tagLen < 0 {
- return protowire.ParseError(tagLen)
+ return errDecode
}
if num > protowire.MaxValidNumber {
- return errors.New("invalid field number")
+ return errDecode
}
// Find the field descriptor for this field number.
@@ -155,7 +161,7 @@
}
valLen = protowire.ConsumeFieldValue(num, wtyp, b[tagLen:])
if valLen < 0 {
- return protowire.ParseError(valLen)
+ return errDecode
}
if !o.DiscardUnknown {
m.SetUnknown(append(m.GetUnknown(), b[:tagLen+valLen]...))
@@ -190,7 +196,7 @@
}
b, n = protowire.ConsumeBytes(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
var (
keyField = fd.MapKey()
@@ -209,21 +215,21 @@
for len(b) > 0 {
num, wtyp, n := protowire.ConsumeTag(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
if num > protowire.MaxValidNumber {
- return 0, errors.New("invalid field number")
+ return 0, errDecode
}
b = b[n:]
err = errUnknown
switch num {
- case 1:
+ case genid.MapEntry_Key_field_number:
key, n, err = o.unmarshalScalar(b, wtyp, keyField)
if err != nil {
break
}
haveKey = true
- case 2:
+ case genid.MapEntry_Value_field_number:
var v protoreflect.Value
v, n, err = o.unmarshalScalar(b, wtyp, valField)
if err != nil {
@@ -242,7 +248,7 @@
if err == errUnknown {
n = protowire.ConsumeFieldValue(num, wtyp, b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
} else if err != nil {
return 0, err
@@ -268,3 +274,5 @@
// to the unknown field set of a message. It is never returned from an exported
// function.
var errUnknown = errors.New("BUG: internal error (unknown)")
+
+var errDecode = errors.New("cannot parse invalid wire-format data")
diff --git a/vendor/google.golang.org/protobuf/proto/decode_gen.go b/vendor/google.golang.org/protobuf/proto/decode_gen.go
index d6dc904..301eeb2 100644
--- a/vendor/google.golang.org/protobuf/proto/decode_gen.go
+++ b/vendor/google.golang.org/protobuf/proto/decode_gen.go
@@ -27,7 +27,7 @@
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
- return val, 0, protowire.ParseError(n)
+ return val, 0, errDecode
}
return protoreflect.ValueOfBool(protowire.DecodeBool(v)), n, nil
case protoreflect.EnumKind:
@@ -36,7 +36,7 @@
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
- return val, 0, protowire.ParseError(n)
+ return val, 0, errDecode
}
return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), n, nil
case protoreflect.Int32Kind:
@@ -45,7 +45,7 @@
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
- return val, 0, protowire.ParseError(n)
+ return val, 0, errDecode
}
return protoreflect.ValueOfInt32(int32(v)), n, nil
case protoreflect.Sint32Kind:
@@ -54,7 +54,7 @@
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
- return val, 0, protowire.ParseError(n)
+ return val, 0, errDecode
}
return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), n, nil
case protoreflect.Uint32Kind:
@@ -63,7 +63,7 @@
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
- return val, 0, protowire.ParseError(n)
+ return val, 0, errDecode
}
return protoreflect.ValueOfUint32(uint32(v)), n, nil
case protoreflect.Int64Kind:
@@ -72,7 +72,7 @@
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
- return val, 0, protowire.ParseError(n)
+ return val, 0, errDecode
}
return protoreflect.ValueOfInt64(int64(v)), n, nil
case protoreflect.Sint64Kind:
@@ -81,7 +81,7 @@
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
- return val, 0, protowire.ParseError(n)
+ return val, 0, errDecode
}
return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), n, nil
case protoreflect.Uint64Kind:
@@ -90,7 +90,7 @@
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
- return val, 0, protowire.ParseError(n)
+ return val, 0, errDecode
}
return protoreflect.ValueOfUint64(v), n, nil
case protoreflect.Sfixed32Kind:
@@ -99,7 +99,7 @@
}
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return val, 0, protowire.ParseError(n)
+ return val, 0, errDecode
}
return protoreflect.ValueOfInt32(int32(v)), n, nil
case protoreflect.Fixed32Kind:
@@ -108,7 +108,7 @@
}
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return val, 0, protowire.ParseError(n)
+ return val, 0, errDecode
}
return protoreflect.ValueOfUint32(uint32(v)), n, nil
case protoreflect.FloatKind:
@@ -117,7 +117,7 @@
}
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return val, 0, protowire.ParseError(n)
+ return val, 0, errDecode
}
return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), n, nil
case protoreflect.Sfixed64Kind:
@@ -126,7 +126,7 @@
}
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return val, 0, protowire.ParseError(n)
+ return val, 0, errDecode
}
return protoreflect.ValueOfInt64(int64(v)), n, nil
case protoreflect.Fixed64Kind:
@@ -135,7 +135,7 @@
}
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return val, 0, protowire.ParseError(n)
+ return val, 0, errDecode
}
return protoreflect.ValueOfUint64(v), n, nil
case protoreflect.DoubleKind:
@@ -144,7 +144,7 @@
}
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return val, 0, protowire.ParseError(n)
+ return val, 0, errDecode
}
return protoreflect.ValueOfFloat64(math.Float64frombits(v)), n, nil
case protoreflect.StringKind:
@@ -153,7 +153,7 @@
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return val, 0, protowire.ParseError(n)
+ return val, 0, errDecode
}
if strs.EnforceUTF8(fd) && !utf8.Valid(v) {
return protoreflect.Value{}, 0, errors.InvalidUTF8(string(fd.FullName()))
@@ -165,7 +165,7 @@
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return val, 0, protowire.ParseError(n)
+ return val, 0, errDecode
}
return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), n, nil
case protoreflect.MessageKind:
@@ -174,7 +174,7 @@
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return val, 0, protowire.ParseError(n)
+ return val, 0, errDecode
}
return protoreflect.ValueOfBytes(v), n, nil
case protoreflect.GroupKind:
@@ -183,7 +183,7 @@
}
v, n := protowire.ConsumeGroup(fd.Number(), b)
if n < 0 {
- return val, 0, protowire.ParseError(n)
+ return val, 0, errDecode
}
return protoreflect.ValueOfBytes(v), n, nil
default:
@@ -197,12 +197,12 @@
if wtyp == protowire.BytesType {
buf, n := protowire.ConsumeBytes(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
for len(buf) > 0 {
v, n := protowire.ConsumeVarint(buf)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
buf = buf[n:]
list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v)))
@@ -214,7 +214,7 @@
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v)))
return n, nil
@@ -222,12 +222,12 @@
if wtyp == protowire.BytesType {
buf, n := protowire.ConsumeBytes(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
for len(buf) > 0 {
v, n := protowire.ConsumeVarint(buf)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
buf = buf[n:]
list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)))
@@ -239,7 +239,7 @@
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)))
return n, nil
@@ -247,12 +247,12 @@
if wtyp == protowire.BytesType {
buf, n := protowire.ConsumeBytes(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
for len(buf) > 0 {
v, n := protowire.ConsumeVarint(buf)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
buf = buf[n:]
list.Append(protoreflect.ValueOfInt32(int32(v)))
@@ -264,7 +264,7 @@
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
list.Append(protoreflect.ValueOfInt32(int32(v)))
return n, nil
@@ -272,12 +272,12 @@
if wtyp == protowire.BytesType {
buf, n := protowire.ConsumeBytes(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
for len(buf) > 0 {
v, n := protowire.ConsumeVarint(buf)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
buf = buf[n:]
list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))))
@@ -289,7 +289,7 @@
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))))
return n, nil
@@ -297,12 +297,12 @@
if wtyp == protowire.BytesType {
buf, n := protowire.ConsumeBytes(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
for len(buf) > 0 {
v, n := protowire.ConsumeVarint(buf)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
buf = buf[n:]
list.Append(protoreflect.ValueOfUint32(uint32(v)))
@@ -314,7 +314,7 @@
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
list.Append(protoreflect.ValueOfUint32(uint32(v)))
return n, nil
@@ -322,12 +322,12 @@
if wtyp == protowire.BytesType {
buf, n := protowire.ConsumeBytes(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
for len(buf) > 0 {
v, n := protowire.ConsumeVarint(buf)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
buf = buf[n:]
list.Append(protoreflect.ValueOfInt64(int64(v)))
@@ -339,7 +339,7 @@
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
list.Append(protoreflect.ValueOfInt64(int64(v)))
return n, nil
@@ -347,12 +347,12 @@
if wtyp == protowire.BytesType {
buf, n := protowire.ConsumeBytes(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
for len(buf) > 0 {
v, n := protowire.ConsumeVarint(buf)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
buf = buf[n:]
list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)))
@@ -364,7 +364,7 @@
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)))
return n, nil
@@ -372,12 +372,12 @@
if wtyp == protowire.BytesType {
buf, n := protowire.ConsumeBytes(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
for len(buf) > 0 {
v, n := protowire.ConsumeVarint(buf)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
buf = buf[n:]
list.Append(protoreflect.ValueOfUint64(v))
@@ -389,7 +389,7 @@
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
list.Append(protoreflect.ValueOfUint64(v))
return n, nil
@@ -397,12 +397,12 @@
if wtyp == protowire.BytesType {
buf, n := protowire.ConsumeBytes(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
for len(buf) > 0 {
v, n := protowire.ConsumeFixed32(buf)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
buf = buf[n:]
list.Append(protoreflect.ValueOfInt32(int32(v)))
@@ -414,7 +414,7 @@
}
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
list.Append(protoreflect.ValueOfInt32(int32(v)))
return n, nil
@@ -422,12 +422,12 @@
if wtyp == protowire.BytesType {
buf, n := protowire.ConsumeBytes(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
for len(buf) > 0 {
v, n := protowire.ConsumeFixed32(buf)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
buf = buf[n:]
list.Append(protoreflect.ValueOfUint32(uint32(v)))
@@ -439,7 +439,7 @@
}
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
list.Append(protoreflect.ValueOfUint32(uint32(v)))
return n, nil
@@ -447,12 +447,12 @@
if wtyp == protowire.BytesType {
buf, n := protowire.ConsumeBytes(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
for len(buf) > 0 {
v, n := protowire.ConsumeFixed32(buf)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
buf = buf[n:]
list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))))
@@ -464,7 +464,7 @@
}
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))))
return n, nil
@@ -472,12 +472,12 @@
if wtyp == protowire.BytesType {
buf, n := protowire.ConsumeBytes(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
for len(buf) > 0 {
v, n := protowire.ConsumeFixed64(buf)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
buf = buf[n:]
list.Append(protoreflect.ValueOfInt64(int64(v)))
@@ -489,7 +489,7 @@
}
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
list.Append(protoreflect.ValueOfInt64(int64(v)))
return n, nil
@@ -497,12 +497,12 @@
if wtyp == protowire.BytesType {
buf, n := protowire.ConsumeBytes(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
for len(buf) > 0 {
v, n := protowire.ConsumeFixed64(buf)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
buf = buf[n:]
list.Append(protoreflect.ValueOfUint64(v))
@@ -514,7 +514,7 @@
}
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
list.Append(protoreflect.ValueOfUint64(v))
return n, nil
@@ -522,12 +522,12 @@
if wtyp == protowire.BytesType {
buf, n := protowire.ConsumeBytes(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
for len(buf) > 0 {
v, n := protowire.ConsumeFixed64(buf)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
buf = buf[n:]
list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v)))
@@ -539,7 +539,7 @@
}
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v)))
return n, nil
@@ -549,7 +549,7 @@
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
if strs.EnforceUTF8(fd) && !utf8.Valid(v) {
return 0, errors.InvalidUTF8(string(fd.FullName()))
@@ -562,7 +562,7 @@
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...)))
return n, nil
@@ -572,7 +572,7 @@
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
m := list.NewElement()
if err := o.unmarshalMessage(v, m.Message()); err != nil {
@@ -586,7 +586,7 @@
}
v, n := protowire.ConsumeGroup(fd.Number(), b)
if n < 0 {
- return 0, protowire.ParseError(n)
+ return 0, errDecode
}
m := list.NewElement()
if err := o.unmarshalMessage(v, m.Message()); err != nil {
diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go
index fa738a1..d18239c 100644
--- a/vendor/google.golang.org/protobuf/proto/encode.go
+++ b/vendor/google.golang.org/protobuf/proto/encode.go
@@ -5,12 +5,9 @@
package proto
import (
- "sort"
-
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/encoding/messageset"
- "google.golang.org/protobuf/internal/fieldsort"
- "google.golang.org/protobuf/internal/mapsort"
+ "google.golang.org/protobuf/internal/order"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/runtime/protoiface"
@@ -74,19 +71,54 @@
// Marshal returns the wire-format encoding of m.
func Marshal(m Message) ([]byte, error) {
+ // Treat nil message interface as an empty message; nothing to output.
+ if m == nil {
+ return nil, nil
+ }
+
out, err := MarshalOptions{}.marshal(nil, m.ProtoReflect())
+ if len(out.Buf) == 0 && err == nil {
+ out.Buf = emptyBytesForMessage(m)
+ }
return out.Buf, err
}
// Marshal returns the wire-format encoding of m.
func (o MarshalOptions) Marshal(m Message) ([]byte, error) {
+ // Treat nil message interface as an empty message; nothing to output.
+ if m == nil {
+ return nil, nil
+ }
+
out, err := o.marshal(nil, m.ProtoReflect())
+ if len(out.Buf) == 0 && err == nil {
+ out.Buf = emptyBytesForMessage(m)
+ }
return out.Buf, err
}
+// emptyBytesForMessage returns a nil buffer if and only if m is invalid,
+// otherwise it returns a non-nil empty buffer.
+//
+// This is to assist the edge-case where user-code does the following:
+// m1.OptionalBytes, _ = proto.Marshal(m2)
+// where they expect the proto2 "optional_bytes" field to be populated
+// if any only if m2 is a valid message.
+func emptyBytesForMessage(m Message) []byte {
+ if m == nil || !m.ProtoReflect().IsValid() {
+ return nil
+ }
+ return emptyBuf[:]
+}
+
// MarshalAppend appends the wire-format encoding of m to b,
// returning the result.
func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) {
+ // Treat nil message interface as an empty message; nothing to append.
+ if m == nil {
+ return b, nil
+ }
+
out, err := o.marshal(b, m.ProtoReflect())
return out.Buf, err
}
@@ -99,6 +131,9 @@
return o.marshal(in.Buf, in.Message)
}
+// marshal is a centralized function that all marshal operations go through.
+// For profiling purposes, avoid changing the name of this function or
+// introducing other code paths for marshal that do not go through this.
func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoiface.MarshalOutput, err error) {
allowPartial := o.AllowPartial
o.AllowPartial = true
@@ -171,16 +206,17 @@
func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([]byte, error) {
if messageset.IsMessageSet(m.Descriptor()) {
- return marshalMessageSet(b, m, o)
+ return o.marshalMessageSet(b, m)
}
- // There are many choices for what order we visit fields in. The default one here
- // is chosen for reasonable efficiency and simplicity given the protoreflect API.
- // It is not deterministic, since Message.Range does not return fields in any
- // defined order.
- //
- // When using deterministic serialization, we sort the known fields.
+ fieldOrder := order.AnyFieldOrder
+ if o.Deterministic {
+ // TODO: This should use a more natural ordering like NumberFieldOrder,
+ // but doing so breaks golden tests that make invalid assumption about
+ // output stability of this implementation.
+ fieldOrder = order.LegacyFieldOrder
+ }
var err error
- o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
b, err = o.marshalField(b, fd, v)
return err == nil
})
@@ -191,27 +227,6 @@
return b, nil
}
-// rangeFields visits fields in a defined order when deterministic serialization is enabled.
-func (o MarshalOptions) rangeFields(m protoreflect.Message, f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
- if !o.Deterministic {
- m.Range(f)
- return
- }
- var fds []protoreflect.FieldDescriptor
- m.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
- fds = append(fds, fd)
- return true
- })
- sort.Slice(fds, func(a, b int) bool {
- return fieldsort.Less(fds[a], fds[b])
- })
- for _, fd := range fds {
- if !f(fd, m.Get(fd)) {
- break
- }
- }
-}
-
func (o MarshalOptions) marshalField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) {
switch {
case fd.IsList():
@@ -254,8 +269,12 @@
func (o MarshalOptions) marshalMap(b []byte, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) ([]byte, error) {
keyf := fd.MapKey()
valf := fd.MapValue()
+ keyOrder := order.AnyKeyOrder
+ if o.Deterministic {
+ keyOrder = order.GenericKeyOrder
+ }
var err error
- o.rangeMap(mapv, keyf.Kind(), func(key protoreflect.MapKey, value protoreflect.Value) bool {
+ order.RangeEntries(mapv, keyOrder, func(key protoreflect.MapKey, value protoreflect.Value) bool {
b = protowire.AppendTag(b, fd.Number(), protowire.BytesType)
var pos int
b, pos = appendSpeculativeLength(b)
@@ -274,14 +293,6 @@
return b, err
}
-func (o MarshalOptions) rangeMap(mapv protoreflect.Map, kind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) {
- if !o.Deterministic {
- mapv.Range(f)
- return
- }
- mapsort.Range(mapv, kind, f)
-}
-
// When encoding length-prefixed fields, we speculatively set aside some number of bytes
// for the length, encode the data, and then encode the length (shifting the data if necessary
// to make room).
diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go
index 10902bd..4dba2b9 100644
--- a/vendor/google.golang.org/protobuf/proto/equal.go
+++ b/vendor/google.golang.org/protobuf/proto/equal.go
@@ -111,18 +111,31 @@
// equalValue compares two singular values.
func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool {
- switch {
- case fd.Message() != nil:
- return equalMessage(x.Message(), y.Message())
- case fd.Kind() == pref.BytesKind:
- return bytes.Equal(x.Bytes(), y.Bytes())
- case fd.Kind() == pref.FloatKind, fd.Kind() == pref.DoubleKind:
+ switch fd.Kind() {
+ case pref.BoolKind:
+ return x.Bool() == y.Bool()
+ case pref.EnumKind:
+ return x.Enum() == y.Enum()
+ case pref.Int32Kind, pref.Sint32Kind,
+ pref.Int64Kind, pref.Sint64Kind,
+ pref.Sfixed32Kind, pref.Sfixed64Kind:
+ return x.Int() == y.Int()
+ case pref.Uint32Kind, pref.Uint64Kind,
+ pref.Fixed32Kind, pref.Fixed64Kind:
+ return x.Uint() == y.Uint()
+ case pref.FloatKind, pref.DoubleKind:
fx := x.Float()
fy := y.Float()
if math.IsNaN(fx) || math.IsNaN(fy) {
return math.IsNaN(fx) && math.IsNaN(fy)
}
return fx == fy
+ case pref.StringKind:
+ return x.String() == y.String()
+ case pref.BytesKind:
+ return bytes.Equal(x.Bytes(), y.Bytes())
+ case pref.MessageKind, pref.GroupKind:
+ return equalMessage(x.Message(), y.Message())
default:
return x.Interface() == y.Interface()
}
diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go
index 73f2431..5f293cd 100644
--- a/vendor/google.golang.org/protobuf/proto/extension.go
+++ b/vendor/google.golang.org/protobuf/proto/extension.go
@@ -9,28 +9,84 @@
)
// HasExtension reports whether an extension field is populated.
-// It panics if ext does not extend m.
-func HasExtension(m Message, ext protoreflect.ExtensionType) bool {
- return m.ProtoReflect().Has(ext.TypeDescriptor())
+// It returns false if m is invalid or if xt does not extend m.
+func HasExtension(m Message, xt protoreflect.ExtensionType) bool {
+ // Treat nil message interface as an empty message; no populated fields.
+ if m == nil {
+ return false
+ }
+
+ // As a special-case, we reports invalid or mismatching descriptors
+ // as always not being populated (since they aren't).
+ if xt == nil || m.ProtoReflect().Descriptor() != xt.TypeDescriptor().ContainingMessage() {
+ return false
+ }
+
+ return m.ProtoReflect().Has(xt.TypeDescriptor())
}
// ClearExtension clears an extension field such that subsequent
// HasExtension calls return false.
-// It panics if ext does not extend m.
-func ClearExtension(m Message, ext protoreflect.ExtensionType) {
- m.ProtoReflect().Clear(ext.TypeDescriptor())
+// It panics if m is invalid or if xt does not extend m.
+func ClearExtension(m Message, xt protoreflect.ExtensionType) {
+ m.ProtoReflect().Clear(xt.TypeDescriptor())
}
// GetExtension retrieves the value for an extension field.
// If the field is unpopulated, it returns the default value for
-// scalars and an immutable, empty value for lists, maps, or messages.
-// It panics if ext does not extend m.
-func GetExtension(m Message, ext protoreflect.ExtensionType) interface{} {
- return ext.InterfaceOf(m.ProtoReflect().Get(ext.TypeDescriptor()))
+// scalars and an immutable, empty value for lists or messages.
+// It panics if xt does not extend m.
+func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} {
+ // Treat nil message interface as an empty message; return the default.
+ if m == nil {
+ return xt.InterfaceOf(xt.Zero())
+ }
+
+ return xt.InterfaceOf(m.ProtoReflect().Get(xt.TypeDescriptor()))
}
// SetExtension stores the value of an extension field.
-// It panics if ext does not extend m or if value type is invalid for the field.
-func SetExtension(m Message, ext protoreflect.ExtensionType, value interface{}) {
- m.ProtoReflect().Set(ext.TypeDescriptor(), ext.ValueOf(value))
+// It panics if m is invalid, xt does not extend m, or if type of v
+// is invalid for the specified extension field.
+func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) {
+ xd := xt.TypeDescriptor()
+ pv := xt.ValueOf(v)
+
+ // Specially treat an invalid list, map, or message as clear.
+ isValid := true
+ switch {
+ case xd.IsList():
+ isValid = pv.List().IsValid()
+ case xd.IsMap():
+ isValid = pv.Map().IsValid()
+ case xd.Message() != nil:
+ isValid = pv.Message().IsValid()
+ }
+ if !isValid {
+ m.ProtoReflect().Clear(xd)
+ return
+ }
+
+ m.ProtoReflect().Set(xd, pv)
+}
+
+// RangeExtensions iterates over every populated extension field in m in an
+// undefined order, calling f for each extension type and value encountered.
+// It returns immediately if f returns false.
+// While iterating, mutating operations may only be performed
+// on the current extension field.
+func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) {
+ // Treat nil message interface as an empty message; nothing to range over.
+ if m == nil {
+ return
+ }
+
+ m.ProtoReflect().Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ if fd.IsExtension() {
+ xt := fd.(protoreflect.ExtensionTypeDescriptor).Type()
+ vi := xt.InterfaceOf(v)
+ return f(xt, vi)
+ }
+ return true
+ })
}
diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go
index df72f98..d761ab3 100644
--- a/vendor/google.golang.org/protobuf/proto/merge.go
+++ b/vendor/google.golang.org/protobuf/proto/merge.go
@@ -5,6 +5,8 @@
package proto
import (
+ "fmt"
+
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/runtime/protoiface"
)
@@ -21,8 +23,14 @@
// It is semantically equivalent to unmarshaling the encoded form of src
// into dst with the UnmarshalOptions.Merge option specified.
func Merge(dst, src Message) {
+ // TODO: Should nil src be treated as semantically equivalent to a
+ // untyped, read-only, empty message? What about a nil dst?
+
dstMsg, srcMsg := dst.ProtoReflect(), src.ProtoReflect()
if dstMsg.Descriptor() != srcMsg.Descriptor() {
+ if got, want := dstMsg.Descriptor().FullName(), srcMsg.Descriptor().FullName(); got != want {
+ panic(fmt.Sprintf("descriptor mismatch: %v != %v", got, want))
+ }
panic("descriptor mismatch")
}
mergeOptions{}.mergeMessage(dstMsg, srcMsg)
@@ -69,7 +77,7 @@
}
if !dst.IsValid() {
- panic("cannot merge into invalid destination message")
+ panic(fmt.Sprintf("cannot merge into invalid %v message", dst.Descriptor().FullName()))
}
src.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go
index b6b3de5..312d5d4 100644
--- a/vendor/google.golang.org/protobuf/proto/messageset.go
+++ b/vendor/google.golang.org/protobuf/proto/messageset.go
@@ -9,28 +9,33 @@
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/internal/order"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
-func sizeMessageSet(m protoreflect.Message) (size int) {
+func (o MarshalOptions) sizeMessageSet(m protoreflect.Message) (size int) {
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
size += messageset.SizeField(fd.Number())
size += protowire.SizeTag(messageset.FieldMessage)
- size += protowire.SizeBytes(sizeMessage(v.Message()))
+ size += protowire.SizeBytes(o.size(v.Message()))
return true
})
size += messageset.SizeUnknown(m.GetUnknown())
return size
}
-func marshalMessageSet(b []byte, m protoreflect.Message, o MarshalOptions) ([]byte, error) {
+func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]byte, error) {
if !flags.ProtoLegacy {
return b, errors.New("no support for message_set_wire_format")
}
+ fieldOrder := order.AnyFieldOrder
+ if o.Deterministic {
+ fieldOrder = order.NumberFieldOrder
+ }
var err error
- o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
- b, err = marshalMessageSetField(b, fd, v, o)
+ order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ b, err = o.marshalMessageSetField(b, fd, v)
return err == nil
})
if err != nil {
@@ -39,7 +44,7 @@
return messageset.AppendUnknown(b, m.GetUnknown())
}
-func marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value, o MarshalOptions) ([]byte, error) {
+func (o MarshalOptions) marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) {
b = messageset.AppendFieldStart(b, fd.Number())
b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType)
b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface())))
@@ -51,12 +56,12 @@
return b, nil
}
-func unmarshalMessageSet(b []byte, m protoreflect.Message, o UnmarshalOptions) error {
+func (o UnmarshalOptions) unmarshalMessageSet(b []byte, m protoreflect.Message) error {
if !flags.ProtoLegacy {
return errors.New("no support for message_set_wire_format")
}
return messageset.Unmarshal(b, false, func(num protowire.Number, v []byte) error {
- err := unmarshalMessageSetField(m, num, v, o)
+ err := o.unmarshalMessageSetField(m, num, v)
if err == errUnknown {
unknown := m.GetUnknown()
unknown = protowire.AppendTag(unknown, num, protowire.BytesType)
@@ -68,7 +73,7 @@
})
}
-func unmarshalMessageSetField(m protoreflect.Message, num protowire.Number, v []byte, o UnmarshalOptions) error {
+func (o UnmarshalOptions) unmarshalMessageSetField(m protoreflect.Message, num protowire.Number, v []byte) error {
md := m.Descriptor()
if !md.ExtensionRanges().Has(num) {
return errUnknown
diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go
index ca14b09..1f0d183 100644
--- a/vendor/google.golang.org/protobuf/proto/proto.go
+++ b/vendor/google.golang.org/protobuf/proto/proto.go
@@ -32,3 +32,12 @@
func init() {
Error = errors.Error
}
+
+// MessageName returns the full name of m.
+// If m is nil, it returns an empty string.
+func MessageName(m Message) protoreflect.FullName {
+ if m == nil {
+ return ""
+ }
+ return m.ProtoReflect().Descriptor().FullName()
+}
diff --git a/vendor/google.golang.org/protobuf/proto/reset.go b/vendor/google.golang.org/protobuf/proto/reset.go
index 22048bc..3d7f894 100644
--- a/vendor/google.golang.org/protobuf/proto/reset.go
+++ b/vendor/google.golang.org/protobuf/proto/reset.go
@@ -4,7 +4,11 @@
package proto
-import "google.golang.org/protobuf/reflect/protoreflect"
+import (
+ "fmt"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
// Reset clears every field in the message.
// The resulting message shares no observable memory with its previous state
@@ -19,7 +23,7 @@
func resetMessage(m protoreflect.Message) {
if !m.IsValid() {
- panic("cannot reset invalid message")
+ panic(fmt.Sprintf("cannot reset invalid %v message", m.Descriptor().FullName()))
}
// Clear all known fields.
diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go
index a4e72bd..554b9c6 100644
--- a/vendor/google.golang.org/protobuf/proto/size.go
+++ b/vendor/google.golang.org/protobuf/proto/size.go
@@ -18,10 +18,18 @@
// Size returns the size in bytes of the wire-format encoding of m.
func (o MarshalOptions) Size(m Message) int {
- return sizeMessage(m.ProtoReflect())
+ // Treat a nil message interface as an empty message; nothing to output.
+ if m == nil {
+ return 0
+ }
+
+ return o.size(m.ProtoReflect())
}
-func sizeMessage(m protoreflect.Message) (size int) {
+// size is a centralized function that all size operations go through.
+// For profiling purposes, avoid changing the name of this function or
+// introducing other code paths for size that do not go through this.
+func (o MarshalOptions) size(m protoreflect.Message) (size int) {
methods := protoMethods(m)
if methods != nil && methods.Size != nil {
out := methods.Size(protoiface.SizeInput{
@@ -37,52 +45,52 @@
})
return len(out.Buf)
}
- return sizeMessageSlow(m)
+ return o.sizeMessageSlow(m)
}
-func sizeMessageSlow(m protoreflect.Message) (size int) {
+func (o MarshalOptions) sizeMessageSlow(m protoreflect.Message) (size int) {
if messageset.IsMessageSet(m.Descriptor()) {
- return sizeMessageSet(m)
+ return o.sizeMessageSet(m)
}
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
- size += sizeField(fd, v)
+ size += o.sizeField(fd, v)
return true
})
size += len(m.GetUnknown())
return size
}
-func sizeField(fd protoreflect.FieldDescriptor, value protoreflect.Value) (size int) {
+func (o MarshalOptions) sizeField(fd protoreflect.FieldDescriptor, value protoreflect.Value) (size int) {
num := fd.Number()
switch {
case fd.IsList():
- return sizeList(num, fd, value.List())
+ return o.sizeList(num, fd, value.List())
case fd.IsMap():
- return sizeMap(num, fd, value.Map())
+ return o.sizeMap(num, fd, value.Map())
default:
- return protowire.SizeTag(num) + sizeSingular(num, fd.Kind(), value)
+ return protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), value)
}
}
-func sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) {
+func (o MarshalOptions) sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) {
if fd.IsPacked() && list.Len() > 0 {
content := 0
for i, llen := 0, list.Len(); i < llen; i++ {
- content += sizeSingular(num, fd.Kind(), list.Get(i))
+ content += o.sizeSingular(num, fd.Kind(), list.Get(i))
}
return protowire.SizeTag(num) + protowire.SizeBytes(content)
}
for i, llen := 0, list.Len(); i < llen; i++ {
- size += protowire.SizeTag(num) + sizeSingular(num, fd.Kind(), list.Get(i))
+ size += protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), list.Get(i))
}
return size
}
-func sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) {
+func (o MarshalOptions) sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) {
mapv.Range(func(key protoreflect.MapKey, value protoreflect.Value) bool {
size += protowire.SizeTag(num)
- size += protowire.SizeBytes(sizeField(fd.MapKey(), key.Value()) + sizeField(fd.MapValue(), value))
+ size += protowire.SizeBytes(o.sizeField(fd.MapKey(), key.Value()) + o.sizeField(fd.MapValue(), value))
return true
})
return size
diff --git a/vendor/google.golang.org/protobuf/proto/size_gen.go b/vendor/google.golang.org/protobuf/proto/size_gen.go
index 1118460..3cf61a8 100644
--- a/vendor/google.golang.org/protobuf/proto/size_gen.go
+++ b/vendor/google.golang.org/protobuf/proto/size_gen.go
@@ -11,7 +11,7 @@
"google.golang.org/protobuf/reflect/protoreflect"
)
-func sizeSingular(num protowire.Number, kind protoreflect.Kind, v protoreflect.Value) int {
+func (o MarshalOptions) sizeSingular(num protowire.Number, kind protoreflect.Kind, v protoreflect.Value) int {
switch kind {
case protoreflect.BoolKind:
return protowire.SizeVarint(protowire.EncodeBool(v.Bool()))
@@ -46,9 +46,9 @@
case protoreflect.BytesKind:
return protowire.SizeBytes(len(v.Bytes()))
case protoreflect.MessageKind:
- return protowire.SizeBytes(sizeMessage(v.Message()))
+ return protowire.SizeBytes(o.size(v.Message()))
case protoreflect.GroupKind:
- return protowire.SizeGroup(num, sizeMessage(v.Message()))
+ return protowire.SizeGroup(num, o.size(v.Message()))
default:
return 0
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
new file mode 100644
index 0000000..e4dfb12
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
@@ -0,0 +1,276 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protodesc provides functionality for converting
+// FileDescriptorProto messages to/from protoreflect.FileDescriptor values.
+//
+// The google.protobuf.FileDescriptorProto is a protobuf message that describes
+// the type information for a .proto file in a form that is easily serializable.
+// The protoreflect.FileDescriptor is a more structured representation of
+// the FileDescriptorProto message where references and remote dependencies
+// can be directly followed.
+package protodesc
+
+import (
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+
+ "google.golang.org/protobuf/types/descriptorpb"
+)
+
+// Resolver is the resolver used by NewFile to resolve dependencies.
+// The enums and messages provided must belong to some parent file,
+// which is also registered.
+//
+// It is implemented by protoregistry.Files.
+type Resolver interface {
+ FindFileByPath(string) (protoreflect.FileDescriptor, error)
+ FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error)
+}
+
+// FileOptions configures the construction of file descriptors.
+type FileOptions struct {
+ pragma.NoUnkeyedLiterals
+
+ // AllowUnresolvable configures New to permissively allow unresolvable
+ // file, enum, or message dependencies. Unresolved dependencies are replaced
+ // by placeholder equivalents.
+ //
+ // The following dependencies may be left unresolved:
+ // • Resolving an imported file.
+ // • Resolving the type for a message field or extension field.
+ // If the kind of the field is unknown, then a placeholder is used for both
+ // the Enum and Message accessors on the protoreflect.FieldDescriptor.
+ // • Resolving an enum value set as the default for an optional enum field.
+ // If unresolvable, the protoreflect.FieldDescriptor.Default is set to the
+ // first value in the associated enum (or zero if the also enum dependency
+ // is also unresolvable). The protoreflect.FieldDescriptor.DefaultEnumValue
+ // is populated with a placeholder.
+ // • Resolving the extended message type for an extension field.
+ // • Resolving the input or output message type for a service method.
+ //
+ // If the unresolved dependency uses a relative name,
+ // then the placeholder will contain an invalid FullName with a "*." prefix,
+ // indicating that the starting prefix of the full name is unknown.
+ AllowUnresolvable bool
+}
+
+// NewFile creates a new protoreflect.FileDescriptor from the provided
+// file descriptor message. See FileOptions.New for more information.
+func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) {
+ return FileOptions{}.New(fd, r)
+}
+
+// NewFiles creates a new protoregistry.Files from the provided
+// FileDescriptorSet message. See FileOptions.NewFiles for more information.
+func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) {
+ return FileOptions{}.NewFiles(fd)
+}
+
+// New creates a new protoreflect.FileDescriptor from the provided
+// file descriptor message. The file must represent a valid proto file according
+// to protobuf semantics. The returned descriptor is a deep copy of the input.
+//
+// Any imported files, enum types, or message types referenced in the file are
+// resolved using the provided registry. When looking up an import file path,
+// the path must be unique. The newly created file descriptor is not registered
+// back into the provided file registry.
+func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) {
+ if r == nil {
+ r = (*protoregistry.Files)(nil) // empty resolver
+ }
+
+ // Handle the file descriptor content.
+ f := &filedesc.File{L2: &filedesc.FileL2{}}
+ switch fd.GetSyntax() {
+ case "proto2", "":
+ f.L1.Syntax = protoreflect.Proto2
+ case "proto3":
+ f.L1.Syntax = protoreflect.Proto3
+ default:
+ return nil, errors.New("invalid syntax: %q", fd.GetSyntax())
+ }
+ f.L1.Path = fd.GetName()
+ if f.L1.Path == "" {
+ return nil, errors.New("file path must be populated")
+ }
+ f.L1.Package = protoreflect.FullName(fd.GetPackage())
+ if !f.L1.Package.IsValid() && f.L1.Package != "" {
+ return nil, errors.New("invalid package: %q", f.L1.Package)
+ }
+ if opts := fd.GetOptions(); opts != nil {
+ opts = proto.Clone(opts).(*descriptorpb.FileOptions)
+ f.L2.Options = func() protoreflect.ProtoMessage { return opts }
+ }
+
+ f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency()))
+ for _, i := range fd.GetPublicDependency() {
+ if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsPublic {
+ return nil, errors.New("invalid or duplicate public import index: %d", i)
+ }
+ f.L2.Imports[i].IsPublic = true
+ }
+ for _, i := range fd.GetWeakDependency() {
+ if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak {
+ return nil, errors.New("invalid or duplicate weak import index: %d", i)
+ }
+ f.L2.Imports[i].IsWeak = true
+ }
+ imps := importSet{f.Path(): true}
+ for i, path := range fd.GetDependency() {
+ imp := &f.L2.Imports[i]
+ f, err := r.FindFileByPath(path)
+ if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) {
+ f = filedesc.PlaceholderFile(path)
+ } else if err != nil {
+ return nil, errors.New("could not resolve import %q: %v", path, err)
+ }
+ imp.FileDescriptor = f
+
+ if imps[imp.Path()] {
+ return nil, errors.New("already imported %q", path)
+ }
+ imps[imp.Path()] = true
+ }
+ for i := range fd.GetDependency() {
+ imp := &f.L2.Imports[i]
+ imps.importPublic(imp.Imports())
+ }
+
+ // Handle source locations.
+ f.L2.Locations.File = f
+ for _, loc := range fd.GetSourceCodeInfo().GetLocation() {
+ var l protoreflect.SourceLocation
+ // TODO: Validate that the path points to an actual declaration?
+ l.Path = protoreflect.SourcePath(loc.GetPath())
+ s := loc.GetSpan()
+ switch len(s) {
+ case 3:
+ l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[0]), int(s[2])
+ case 4:
+ l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[2]), int(s[3])
+ default:
+ return nil, errors.New("invalid span: %v", s)
+ }
+ // TODO: Validate that the span information is sensible?
+ // See https://github.com/protocolbuffers/protobuf/issues/6378.
+ if false && (l.EndLine < l.StartLine || l.StartLine < 0 || l.StartColumn < 0 || l.EndColumn < 0 ||
+ (l.StartLine == l.EndLine && l.EndColumn <= l.StartColumn)) {
+ return nil, errors.New("invalid span: %v", s)
+ }
+ l.LeadingDetachedComments = loc.GetLeadingDetachedComments()
+ l.LeadingComments = loc.GetLeadingComments()
+ l.TrailingComments = loc.GetTrailingComments()
+ f.L2.Locations.List = append(f.L2.Locations.List, l)
+ }
+
+ // Step 1: Allocate and derive the names for all declarations.
+ // This copies all fields from the descriptor proto except:
+ // google.protobuf.FieldDescriptorProto.type_name
+ // google.protobuf.FieldDescriptorProto.default_value
+ // google.protobuf.FieldDescriptorProto.oneof_index
+ // google.protobuf.FieldDescriptorProto.extendee
+ // google.protobuf.MethodDescriptorProto.input
+ // google.protobuf.MethodDescriptorProto.output
+ var err error
+ sb := new(strs.Builder)
+ r1 := make(descsByName)
+ if f.L1.Enums.List, err = r1.initEnumDeclarations(fd.GetEnumType(), f, sb); err != nil {
+ return nil, err
+ }
+ if f.L1.Messages.List, err = r1.initMessagesDeclarations(fd.GetMessageType(), f, sb); err != nil {
+ return nil, err
+ }
+ if f.L1.Extensions.List, err = r1.initExtensionDeclarations(fd.GetExtension(), f, sb); err != nil {
+ return nil, err
+ }
+ if f.L1.Services.List, err = r1.initServiceDeclarations(fd.GetService(), f, sb); err != nil {
+ return nil, err
+ }
+
+ // Step 2: Resolve every dependency reference not handled by step 1.
+ r2 := &resolver{local: r1, remote: r, imports: imps, allowUnresolvable: o.AllowUnresolvable}
+ if err := r2.resolveMessageDependencies(f.L1.Messages.List, fd.GetMessageType()); err != nil {
+ return nil, err
+ }
+ if err := r2.resolveExtensionDependencies(f.L1.Extensions.List, fd.GetExtension()); err != nil {
+ return nil, err
+ }
+ if err := r2.resolveServiceDependencies(f.L1.Services.List, fd.GetService()); err != nil {
+ return nil, err
+ }
+
+ // Step 3: Validate every enum, message, and extension declaration.
+ if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil {
+ return nil, err
+ }
+ if err := validateMessageDeclarations(f.L1.Messages.List, fd.GetMessageType()); err != nil {
+ return nil, err
+ }
+ if err := validateExtensionDeclarations(f.L1.Extensions.List, fd.GetExtension()); err != nil {
+ return nil, err
+ }
+
+ return f, nil
+}
+
+type importSet map[string]bool
+
+func (is importSet) importPublic(imps protoreflect.FileImports) {
+ for i := 0; i < imps.Len(); i++ {
+ if imp := imps.Get(i); imp.IsPublic {
+ is[imp.Path()] = true
+ is.importPublic(imp.Imports())
+ }
+ }
+}
+
+// NewFiles creates a new protoregistry.Files from the provided
+// FileDescriptorSet message. The descriptor set must include only
+// valid files according to protobuf semantics. The returned descriptors
+// are a deep copy of the input.
+func (o FileOptions) NewFiles(fds *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) {
+ files := make(map[string]*descriptorpb.FileDescriptorProto)
+ for _, fd := range fds.File {
+ if _, ok := files[fd.GetName()]; ok {
+ return nil, errors.New("file appears multiple times: %q", fd.GetName())
+ }
+ files[fd.GetName()] = fd
+ }
+ r := &protoregistry.Files{}
+ for _, fd := range files {
+ if err := o.addFileDeps(r, fd, files); err != nil {
+ return nil, err
+ }
+ }
+ return r, nil
+}
+func (o FileOptions) addFileDeps(r *protoregistry.Files, fd *descriptorpb.FileDescriptorProto, files map[string]*descriptorpb.FileDescriptorProto) error {
+ // Set the entry to nil while descending into a file's dependencies to detect cycles.
+ files[fd.GetName()] = nil
+ for _, dep := range fd.Dependency {
+ depfd, ok := files[dep]
+ if depfd == nil {
+ if ok {
+ return errors.New("import cycle in file: %q", dep)
+ }
+ continue
+ }
+ if err := o.addFileDeps(r, depfd, files); err != nil {
+ return err
+ }
+ }
+ // Delete the entry once dependencies are processed.
+ delete(files, fd.GetName())
+ f, err := o.New(fd, r)
+ if err != nil {
+ return err
+ }
+ return r.RegisterFile(f)
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
new file mode 100644
index 0000000..37efda1
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
@@ -0,0 +1,248 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protodesc
+
+import (
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ "google.golang.org/protobuf/types/descriptorpb"
+)
+
+type descsByName map[protoreflect.FullName]protoreflect.Descriptor
+
+func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (es []filedesc.Enum, err error) {
+ es = make([]filedesc.Enum, len(eds)) // allocate up-front to ensure stable pointers
+ for i, ed := range eds {
+ e := &es[i]
+ e.L2 = new(filedesc.EnumL2)
+ if e.L0, err = r.makeBase(e, parent, ed.GetName(), i, sb); err != nil {
+ return nil, err
+ }
+ if opts := ed.GetOptions(); opts != nil {
+ opts = proto.Clone(opts).(*descriptorpb.EnumOptions)
+ e.L2.Options = func() protoreflect.ProtoMessage { return opts }
+ }
+ for _, s := range ed.GetReservedName() {
+ e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s))
+ }
+ for _, rr := range ed.GetReservedRange() {
+ e.L2.ReservedRanges.List = append(e.L2.ReservedRanges.List, [2]protoreflect.EnumNumber{
+ protoreflect.EnumNumber(rr.GetStart()),
+ protoreflect.EnumNumber(rr.GetEnd()),
+ })
+ }
+ if e.L2.Values.List, err = r.initEnumValuesFromDescriptorProto(ed.GetValue(), e, sb); err != nil {
+ return nil, err
+ }
+ }
+ return es, nil
+}
+
+func (r descsByName) initEnumValuesFromDescriptorProto(vds []*descriptorpb.EnumValueDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (vs []filedesc.EnumValue, err error) {
+ vs = make([]filedesc.EnumValue, len(vds)) // allocate up-front to ensure stable pointers
+ for i, vd := range vds {
+ v := &vs[i]
+ if v.L0, err = r.makeBase(v, parent, vd.GetName(), i, sb); err != nil {
+ return nil, err
+ }
+ if opts := vd.GetOptions(); opts != nil {
+ opts = proto.Clone(opts).(*descriptorpb.EnumValueOptions)
+ v.L1.Options = func() protoreflect.ProtoMessage { return opts }
+ }
+ v.L1.Number = protoreflect.EnumNumber(vd.GetNumber())
+ }
+ return vs, nil
+}
+
+func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Message, err error) {
+ ms = make([]filedesc.Message, len(mds)) // allocate up-front to ensure stable pointers
+ for i, md := range mds {
+ m := &ms[i]
+ m.L2 = new(filedesc.MessageL2)
+ if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil {
+ return nil, err
+ }
+ if opts := md.GetOptions(); opts != nil {
+ opts = proto.Clone(opts).(*descriptorpb.MessageOptions)
+ m.L2.Options = func() protoreflect.ProtoMessage { return opts }
+ m.L1.IsMapEntry = opts.GetMapEntry()
+ m.L1.IsMessageSet = opts.GetMessageSetWireFormat()
+ }
+ for _, s := range md.GetReservedName() {
+ m.L2.ReservedNames.List = append(m.L2.ReservedNames.List, protoreflect.Name(s))
+ }
+ for _, rr := range md.GetReservedRange() {
+ m.L2.ReservedRanges.List = append(m.L2.ReservedRanges.List, [2]protoreflect.FieldNumber{
+ protoreflect.FieldNumber(rr.GetStart()),
+ protoreflect.FieldNumber(rr.GetEnd()),
+ })
+ }
+ for _, xr := range md.GetExtensionRange() {
+ m.L2.ExtensionRanges.List = append(m.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{
+ protoreflect.FieldNumber(xr.GetStart()),
+ protoreflect.FieldNumber(xr.GetEnd()),
+ })
+ var optsFunc func() protoreflect.ProtoMessage
+ if opts := xr.GetOptions(); opts != nil {
+ opts = proto.Clone(opts).(*descriptorpb.ExtensionRangeOptions)
+ optsFunc = func() protoreflect.ProtoMessage { return opts }
+ }
+ m.L2.ExtensionRangeOptions = append(m.L2.ExtensionRangeOptions, optsFunc)
+ }
+ if m.L2.Fields.List, err = r.initFieldsFromDescriptorProto(md.GetField(), m, sb); err != nil {
+ return nil, err
+ }
+ if m.L2.Oneofs.List, err = r.initOneofsFromDescriptorProto(md.GetOneofDecl(), m, sb); err != nil {
+ return nil, err
+ }
+ if m.L1.Enums.List, err = r.initEnumDeclarations(md.GetEnumType(), m, sb); err != nil {
+ return nil, err
+ }
+ if m.L1.Messages.List, err = r.initMessagesDeclarations(md.GetNestedType(), m, sb); err != nil {
+ return nil, err
+ }
+ if m.L1.Extensions.List, err = r.initExtensionDeclarations(md.GetExtension(), m, sb); err != nil {
+ return nil, err
+ }
+ }
+ return ms, nil
+}
+
+func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) {
+ fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers
+ for i, fd := range fds {
+ f := &fs[i]
+ if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil {
+ return nil, err
+ }
+ f.L1.IsProto3Optional = fd.GetProto3Optional()
+ if opts := fd.GetOptions(); opts != nil {
+ opts = proto.Clone(opts).(*descriptorpb.FieldOptions)
+ f.L1.Options = func() protoreflect.ProtoMessage { return opts }
+ f.L1.IsWeak = opts.GetWeak()
+ f.L1.HasPacked = opts.Packed != nil
+ f.L1.IsPacked = opts.GetPacked()
+ }
+ f.L1.Number = protoreflect.FieldNumber(fd.GetNumber())
+ f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel())
+ if fd.Type != nil {
+ f.L1.Kind = protoreflect.Kind(fd.GetType())
+ }
+ if fd.JsonName != nil {
+ f.L1.StringName.InitJSON(fd.GetJsonName())
+ }
+ }
+ return fs, nil
+}
+
+func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (os []filedesc.Oneof, err error) {
+ os = make([]filedesc.Oneof, len(ods)) // allocate up-front to ensure stable pointers
+ for i, od := range ods {
+ o := &os[i]
+ if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil {
+ return nil, err
+ }
+ if opts := od.GetOptions(); opts != nil {
+ opts = proto.Clone(opts).(*descriptorpb.OneofOptions)
+ o.L1.Options = func() protoreflect.ProtoMessage { return opts }
+ }
+ }
+ return os, nil
+}
+
+func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (xs []filedesc.Extension, err error) {
+ xs = make([]filedesc.Extension, len(xds)) // allocate up-front to ensure stable pointers
+ for i, xd := range xds {
+ x := &xs[i]
+ x.L2 = new(filedesc.ExtensionL2)
+ if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil {
+ return nil, err
+ }
+ if opts := xd.GetOptions(); opts != nil {
+ opts = proto.Clone(opts).(*descriptorpb.FieldOptions)
+ x.L2.Options = func() protoreflect.ProtoMessage { return opts }
+ x.L2.IsPacked = opts.GetPacked()
+ }
+ x.L1.Number = protoreflect.FieldNumber(xd.GetNumber())
+ x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel())
+ if xd.Type != nil {
+ x.L1.Kind = protoreflect.Kind(xd.GetType())
+ }
+ if xd.JsonName != nil {
+ x.L2.StringName.InitJSON(xd.GetJsonName())
+ }
+ }
+ return xs, nil
+}
+
+func (r descsByName) initServiceDeclarations(sds []*descriptorpb.ServiceDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ss []filedesc.Service, err error) {
+ ss = make([]filedesc.Service, len(sds)) // allocate up-front to ensure stable pointers
+ for i, sd := range sds {
+ s := &ss[i]
+ s.L2 = new(filedesc.ServiceL2)
+ if s.L0, err = r.makeBase(s, parent, sd.GetName(), i, sb); err != nil {
+ return nil, err
+ }
+ if opts := sd.GetOptions(); opts != nil {
+ opts = proto.Clone(opts).(*descriptorpb.ServiceOptions)
+ s.L2.Options = func() protoreflect.ProtoMessage { return opts }
+ }
+ if s.L2.Methods.List, err = r.initMethodsFromDescriptorProto(sd.GetMethod(), s, sb); err != nil {
+ return nil, err
+ }
+ }
+ return ss, nil
+}
+
+func (r descsByName) initMethodsFromDescriptorProto(mds []*descriptorpb.MethodDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Method, err error) {
+ ms = make([]filedesc.Method, len(mds)) // allocate up-front to ensure stable pointers
+ for i, md := range mds {
+ m := &ms[i]
+ if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil {
+ return nil, err
+ }
+ if opts := md.GetOptions(); opts != nil {
+ opts = proto.Clone(opts).(*descriptorpb.MethodOptions)
+ m.L1.Options = func() protoreflect.ProtoMessage { return opts }
+ }
+ m.L1.IsStreamingClient = md.GetClientStreaming()
+ m.L1.IsStreamingServer = md.GetServerStreaming()
+ }
+ return ms, nil
+}
+
+func (r descsByName) makeBase(child, parent protoreflect.Descriptor, name string, idx int, sb *strs.Builder) (filedesc.BaseL0, error) {
+ if !protoreflect.Name(name).IsValid() {
+ return filedesc.BaseL0{}, errors.New("descriptor %q has an invalid nested name: %q", parent.FullName(), name)
+ }
+
+ // Derive the full name of the child.
+ // Note that enum values are a sibling to the enum parent in the namespace.
+ var fullName protoreflect.FullName
+ if _, ok := parent.(protoreflect.EnumDescriptor); ok {
+ fullName = sb.AppendFullName(parent.FullName().Parent(), protoreflect.Name(name))
+ } else {
+ fullName = sb.AppendFullName(parent.FullName(), protoreflect.Name(name))
+ }
+ if _, ok := r[fullName]; ok {
+ return filedesc.BaseL0{}, errors.New("descriptor %q already declared", fullName)
+ }
+ r[fullName] = child
+
+ // TODO: Verify that the full name does not already exist in the resolver?
+ // This is not as critical since most usages of NewFile will register
+ // the created file back into the registry, which will perform this check.
+
+ return filedesc.BaseL0{
+ FullName: fullName,
+ ParentFile: parent.ParentFile().(*filedesc.File),
+ Parent: parent,
+ Index: idx,
+ }, nil
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
new file mode 100644
index 0000000..cebb36c
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
@@ -0,0 +1,286 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protodesc
+
+import (
+ "google.golang.org/protobuf/internal/encoding/defval"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+
+ "google.golang.org/protobuf/types/descriptorpb"
+)
+
+// resolver is a wrapper around a local registry of declarations within the file
+// and the remote resolver. The remote resolver is restricted to only return
+// descriptors that have been imported.
+type resolver struct {
+ local descsByName
+ remote Resolver
+ imports importSet
+
+ allowUnresolvable bool
+}
+
+func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) (err error) {
+ for i, md := range mds {
+ m := &ms[i]
+ for j, fd := range md.GetField() {
+ f := &m.L2.Fields.List[j]
+ if f.L1.Cardinality == protoreflect.Required {
+ m.L2.RequiredNumbers.List = append(m.L2.RequiredNumbers.List, f.L1.Number)
+ }
+ if fd.OneofIndex != nil {
+ k := int(fd.GetOneofIndex())
+ if !(0 <= k && k < len(md.GetOneofDecl())) {
+ return errors.New("message field %q has an invalid oneof index: %d", f.FullName(), k)
+ }
+ o := &m.L2.Oneofs.List[k]
+ f.L1.ContainingOneof = o
+ o.L1.Fields.List = append(o.L1.Fields.List, f)
+ }
+
+ if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil {
+ return errors.New("message field %q cannot resolve type: %v", f.FullName(), err)
+ }
+ if fd.DefaultValue != nil {
+ v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable)
+ if err != nil {
+ return errors.New("message field %q has invalid default: %v", f.FullName(), err)
+ }
+ f.L1.Default = filedesc.DefaultValue(v, ev)
+ }
+ }
+
+ if err := r.resolveMessageDependencies(m.L1.Messages.List, md.GetNestedType()); err != nil {
+ return err
+ }
+ if err := r.resolveExtensionDependencies(m.L1.Extensions.List, md.GetExtension()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) {
+ for i, xd := range xds {
+ x := &xs[i]
+ if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil {
+ return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err)
+ }
+ if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil {
+ return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err)
+ }
+ if xd.DefaultValue != nil {
+ v, ev, err := unmarshalDefault(xd.GetDefaultValue(), x, r.allowUnresolvable)
+ if err != nil {
+ return errors.New("extension field %q has invalid default: %v", x.FullName(), err)
+ }
+ x.L2.Default = filedesc.DefaultValue(v, ev)
+ }
+ }
+ return nil
+}
+
+func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*descriptorpb.ServiceDescriptorProto) (err error) {
+ for i, sd := range sds {
+ s := &ss[i]
+ for j, md := range sd.GetMethod() {
+ m := &s.L2.Methods.List[j]
+ m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false)
+ if err != nil {
+ return errors.New("service method %q cannot resolve input: %v", m.FullName(), err)
+ }
+ m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false)
+ if err != nil {
+ return errors.New("service method %q cannot resolve output: %v", m.FullName(), err)
+ }
+ }
+ }
+ return nil
+}
+
+// findTarget finds an enum or message descriptor if k is an enum, message,
+// group, or unknown. If unknown, and the name could be resolved, the kind
+// returned kind is set based on the type of the resolved descriptor.
+func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) {
+ switch k {
+ case protoreflect.EnumKind:
+ ed, err := r.findEnumDescriptor(scope, ref, isWeak)
+ if err != nil {
+ return 0, nil, nil, err
+ }
+ return k, ed, nil, nil
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ md, err := r.findMessageDescriptor(scope, ref, isWeak)
+ if err != nil {
+ return 0, nil, nil, err
+ }
+ return k, nil, md, nil
+ case 0:
+ // Handle unspecified kinds (possible with parsers that operate
+ // on a per-file basis without knowledge of dependencies).
+ d, err := r.findDescriptor(scope, ref)
+ if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) {
+ return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil
+ } else if err == protoregistry.NotFound {
+ return 0, nil, nil, errors.New("%q not found", ref.FullName())
+ } else if err != nil {
+ return 0, nil, nil, err
+ }
+ switch d := d.(type) {
+ case protoreflect.EnumDescriptor:
+ return protoreflect.EnumKind, d, nil, nil
+ case protoreflect.MessageDescriptor:
+ return protoreflect.MessageKind, nil, d, nil
+ default:
+ return 0, nil, nil, errors.New("unknown kind")
+ }
+ default:
+ if ref != "" {
+ return 0, nil, nil, errors.New("target name cannot be specified for %v", k)
+ }
+ if !k.IsValid() {
+ return 0, nil, nil, errors.New("invalid kind: %d", k)
+ }
+ return k, nil, nil, nil
+ }
+}
+
+// findDescriptor finds the descriptor by name,
+// which may be a relative name within some scope.
+//
+// Suppose the scope was "fizz.buzz" and the reference was "Foo.Bar",
+// then the following full names are searched:
+// * fizz.buzz.Foo.Bar
+// * fizz.Foo.Bar
+// * Foo.Bar
+func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.Descriptor, error) {
+ if !ref.IsValid() {
+ return nil, errors.New("invalid name reference: %q", ref)
+ }
+ if ref.IsFull() {
+ scope, ref = "", ref[1:]
+ }
+ var foundButNotImported protoreflect.Descriptor
+ for {
+ // Derive the full name to search.
+ s := protoreflect.FullName(ref)
+ if scope != "" {
+ s = scope + "." + s
+ }
+
+ // Check the current file for the descriptor.
+ if d, ok := r.local[s]; ok {
+ return d, nil
+ }
+
+ // Check the remote registry for the descriptor.
+ d, err := r.remote.FindDescriptorByName(s)
+ if err == nil {
+ // Only allow descriptors covered by one of the imports.
+ if r.imports[d.ParentFile().Path()] {
+ return d, nil
+ }
+ foundButNotImported = d
+ } else if err != protoregistry.NotFound {
+ return nil, errors.Wrap(err, "%q", s)
+ }
+
+ // Continue on at a higher level of scoping.
+ if scope == "" {
+ if d := foundButNotImported; d != nil {
+ return nil, errors.New("resolved %q, but %q is not imported", d.FullName(), d.ParentFile().Path())
+ }
+ return nil, protoregistry.NotFound
+ }
+ scope = scope.Parent()
+ }
+}
+
+func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) {
+ d, err := r.findDescriptor(scope, ref)
+ if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) {
+ return filedesc.PlaceholderEnum(ref.FullName()), nil
+ } else if err == protoregistry.NotFound {
+ return nil, errors.New("%q not found", ref.FullName())
+ } else if err != nil {
+ return nil, err
+ }
+ ed, ok := d.(protoreflect.EnumDescriptor)
+ if !ok {
+ return nil, errors.New("resolved %q, but it is not an enum", d.FullName())
+ }
+ return ed, nil
+}
+
+func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) {
+ d, err := r.findDescriptor(scope, ref)
+ if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) {
+ return filedesc.PlaceholderMessage(ref.FullName()), nil
+ } else if err == protoregistry.NotFound {
+ return nil, errors.New("%q not found", ref.FullName())
+ } else if err != nil {
+ return nil, err
+ }
+ md, ok := d.(protoreflect.MessageDescriptor)
+ if !ok {
+ return nil, errors.New("resolved %q, but it is not an message", d.FullName())
+ }
+ return md, nil
+}
+
+// partialName is the partial name. A leading dot means that the name is full,
+// otherwise the name is relative to some current scope.
+// See google.protobuf.FieldDescriptorProto.type_name.
+type partialName string
+
+func (s partialName) IsFull() bool {
+ return len(s) > 0 && s[0] == '.'
+}
+
+func (s partialName) IsValid() bool {
+ if s.IsFull() {
+ return protoreflect.FullName(s[1:]).IsValid()
+ }
+ return protoreflect.FullName(s).IsValid()
+}
+
+const unknownPrefix = "*."
+
+// FullName converts the partial name to a full name on a best-effort basis.
+// If relative, it creates an invalid full name, using a "*." prefix
+// to indicate that the start of the full name is unknown.
+func (s partialName) FullName() protoreflect.FullName {
+ if s.IsFull() {
+ return protoreflect.FullName(s[1:])
+ }
+ return protoreflect.FullName(unknownPrefix + s)
+}
+
+func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvable bool) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) {
+ var evs protoreflect.EnumValueDescriptors
+ if fd.Enum() != nil {
+ evs = fd.Enum().Values()
+ }
+ v, ev, err := defval.Unmarshal(s, fd.Kind(), evs, defval.Descriptor)
+ if err != nil && allowUnresolvable && evs != nil && protoreflect.Name(s).IsValid() {
+ v = protoreflect.ValueOfEnum(0)
+ if evs.Len() > 0 {
+ v = protoreflect.ValueOfEnum(evs.Get(0).Number())
+ }
+ ev = filedesc.PlaceholderEnumValue(fd.Enum().FullName().Parent().Append(protoreflect.Name(s)))
+ } else if err != nil {
+ return v, ev, err
+ }
+ if fd.Syntax() == protoreflect.Proto3 {
+ return v, ev, errors.New("cannot be specified under proto3 semantics")
+ }
+ if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated {
+ return v, ev, errors.New("cannot be specified on composite types")
+ }
+ return v, ev, nil
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
new file mode 100644
index 0000000..9af1d56
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
@@ -0,0 +1,374 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protodesc
+
+import (
+ "strings"
+ "unicode"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/internal/genid"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ "google.golang.org/protobuf/types/descriptorpb"
+)
+
+func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescriptorProto) error {
+ for i, ed := range eds {
+ e := &es[i]
+ if err := e.L2.ReservedNames.CheckValid(); err != nil {
+ return errors.New("enum %q reserved names has %v", e.FullName(), err)
+ }
+ if err := e.L2.ReservedRanges.CheckValid(); err != nil {
+ return errors.New("enum %q reserved ranges has %v", e.FullName(), err)
+ }
+ if len(ed.GetValue()) == 0 {
+ return errors.New("enum %q must contain at least one value declaration", e.FullName())
+ }
+ allowAlias := ed.GetOptions().GetAllowAlias()
+ foundAlias := false
+ for i := 0; i < e.Values().Len(); i++ {
+ v1 := e.Values().Get(i)
+ if v2 := e.Values().ByNumber(v1.Number()); v1 != v2 {
+ foundAlias = true
+ if !allowAlias {
+ return errors.New("enum %q has conflicting non-aliased values on number %d: %q with %q", e.FullName(), v1.Number(), v1.Name(), v2.Name())
+ }
+ }
+ }
+ if allowAlias && !foundAlias {
+ return errors.New("enum %q allows aliases, but none were found", e.FullName())
+ }
+ if e.Syntax() == protoreflect.Proto3 {
+ if v := e.Values().Get(0); v.Number() != 0 {
+ return errors.New("enum %q using proto3 semantics must have zero number for the first value", v.FullName())
+ }
+ // Verify that value names in proto3 do not conflict if the
+ // case-insensitive prefix is removed.
+ // See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055
+ names := map[string]protoreflect.EnumValueDescriptor{}
+ prefix := strings.Replace(strings.ToLower(string(e.Name())), "_", "", -1)
+ for i := 0; i < e.Values().Len(); i++ {
+ v1 := e.Values().Get(i)
+ s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix))
+ if v2, ok := names[s]; ok && v1.Number() != v2.Number() {
+ return errors.New("enum %q using proto3 semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name())
+ }
+ names[s] = v1
+ }
+ }
+
+ for j, vd := range ed.GetValue() {
+ v := &e.L2.Values.List[j]
+ if vd.Number == nil {
+ return errors.New("enum value %q must have a specified number", v.FullName())
+ }
+ if e.L2.ReservedNames.Has(v.Name()) {
+ return errors.New("enum value %q must not use reserved name", v.FullName())
+ }
+ if e.L2.ReservedRanges.Has(v.Number()) {
+ return errors.New("enum value %q must not use reserved number %d", v.FullName(), v.Number())
+ }
+ }
+ }
+ return nil
+}
+
+func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error {
+ for i, md := range mds {
+ m := &ms[i]
+
+ // Handle the message descriptor itself.
+ isMessageSet := md.GetOptions().GetMessageSetWireFormat()
+ if err := m.L2.ReservedNames.CheckValid(); err != nil {
+ return errors.New("message %q reserved names has %v", m.FullName(), err)
+ }
+ if err := m.L2.ReservedRanges.CheckValid(isMessageSet); err != nil {
+ return errors.New("message %q reserved ranges has %v", m.FullName(), err)
+ }
+ if err := m.L2.ExtensionRanges.CheckValid(isMessageSet); err != nil {
+ return errors.New("message %q extension ranges has %v", m.FullName(), err)
+ }
+ if err := (*filedesc.FieldRanges).CheckOverlap(&m.L2.ReservedRanges, &m.L2.ExtensionRanges); err != nil {
+ return errors.New("message %q reserved and extension ranges has %v", m.FullName(), err)
+ }
+ for i := 0; i < m.Fields().Len(); i++ {
+ f1 := m.Fields().Get(i)
+ if f2 := m.Fields().ByNumber(f1.Number()); f1 != f2 {
+ return errors.New("message %q has conflicting fields: %q with %q", m.FullName(), f1.Name(), f2.Name())
+ }
+ }
+ if isMessageSet && !flags.ProtoLegacy {
+ return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName())
+ }
+ if isMessageSet && (m.Syntax() != protoreflect.Proto2 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) {
+ return errors.New("message %q is an invalid proto1 MessageSet", m.FullName())
+ }
+ if m.Syntax() == protoreflect.Proto3 {
+ if m.ExtensionRanges().Len() > 0 {
+ return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName())
+ }
+ // Verify that field names in proto3 do not conflict if lowercased
+ // with all underscores removed.
+ // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847
+ names := map[string]protoreflect.FieldDescriptor{}
+ for i := 0; i < m.Fields().Len(); i++ {
+ f1 := m.Fields().Get(i)
+ s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1)
+ if f2, ok := names[s]; ok {
+ return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name())
+ }
+ names[s] = f1
+ }
+ }
+
+ for j, fd := range md.GetField() {
+ f := &m.L2.Fields.List[j]
+ if m.L2.ReservedNames.Has(f.Name()) {
+ return errors.New("message field %q must not use reserved name", f.FullName())
+ }
+ if !f.Number().IsValid() {
+ return errors.New("message field %q has an invalid number: %d", f.FullName(), f.Number())
+ }
+ if !f.Cardinality().IsValid() {
+ return errors.New("message field %q has an invalid cardinality: %d", f.FullName(), f.Cardinality())
+ }
+ if m.L2.ReservedRanges.Has(f.Number()) {
+ return errors.New("message field %q must not use reserved number %d", f.FullName(), f.Number())
+ }
+ if m.L2.ExtensionRanges.Has(f.Number()) {
+ return errors.New("message field %q with number %d in extension range", f.FullName(), f.Number())
+ }
+ if fd.Extendee != nil {
+ return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee())
+ }
+ if f.L1.IsProto3Optional {
+ if f.Syntax() != protoreflect.Proto3 {
+ return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName())
+ }
+ if f.Cardinality() != protoreflect.Optional {
+ return errors.New("message field %q under proto3 optional semantics must have optional cardinality", f.FullName())
+ }
+ if f.ContainingOneof() != nil && f.ContainingOneof().Fields().Len() != 1 {
+ return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName())
+ }
+ }
+ if f.IsWeak() && !flags.ProtoLegacy {
+ return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName())
+ }
+ if f.IsWeak() && (f.Syntax() != protoreflect.Proto2 || !isOptionalMessage(f) || f.ContainingOneof() != nil) {
+ return errors.New("message field %q may only be weak for an optional message", f.FullName())
+ }
+ if f.IsPacked() && !isPackable(f) {
+ return errors.New("message field %q is not packable", f.FullName())
+ }
+ if err := checkValidGroup(f); err != nil {
+ return errors.New("message field %q is an invalid group: %v", f.FullName(), err)
+ }
+ if err := checkValidMap(f); err != nil {
+ return errors.New("message field %q is an invalid map: %v", f.FullName(), err)
+ }
+ if f.Syntax() == protoreflect.Proto3 {
+ if f.Cardinality() == protoreflect.Required {
+ return errors.New("message field %q using proto3 semantics cannot be required", f.FullName())
+ }
+ if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().Syntax() != protoreflect.Proto3 {
+ return errors.New("message field %q using proto3 semantics may only depend on a proto3 enum", f.FullName())
+ }
+ }
+ }
+ seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs
+ for j := range md.GetOneofDecl() {
+ o := &m.L2.Oneofs.List[j]
+ if o.Fields().Len() == 0 {
+ return errors.New("message oneof %q must contain at least one field declaration", o.FullName())
+ }
+ if n := o.Fields().Len(); n-1 != (o.Fields().Get(n-1).Index() - o.Fields().Get(0).Index()) {
+ return errors.New("message oneof %q must have consecutively declared fields", o.FullName())
+ }
+
+ if o.IsSynthetic() {
+ seenSynthetic = true
+ continue
+ }
+ if !o.IsSynthetic() && seenSynthetic {
+ return errors.New("message oneof %q must be declared before synthetic oneofs", o.FullName())
+ }
+
+ for i := 0; i < o.Fields().Len(); i++ {
+ f := o.Fields().Get(i)
+ if f.Cardinality() != protoreflect.Optional {
+ return errors.New("message field %q belongs in a oneof and must be optional", f.FullName())
+ }
+ if f.IsWeak() {
+ return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName())
+ }
+ }
+ }
+
+ if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil {
+ return err
+ }
+ if err := validateMessageDeclarations(m.L1.Messages.List, md.GetNestedType()); err != nil {
+ return err
+ }
+ if err := validateExtensionDeclarations(m.L1.Extensions.List, md.GetExtension()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error {
+ for i, xd := range xds {
+ x := &xs[i]
+ // NOTE: Avoid using the IsValid method since extensions to MessageSet
+ // may have a field number higher than normal. This check only verifies
+ // that the number is not negative or reserved. We check again later
+ // if we know that the extendee is definitely not a MessageSet.
+ if n := x.Number(); n < 0 || (protowire.FirstReservedNumber <= n && n <= protowire.LastReservedNumber) {
+ return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number())
+ }
+ if !x.Cardinality().IsValid() || x.Cardinality() == protoreflect.Required {
+ return errors.New("extension field %q has an invalid cardinality: %d", x.FullName(), x.Cardinality())
+ }
+ if xd.JsonName != nil {
+ // A bug in older versions of protoc would always populate the
+ // "json_name" option for extensions when it is meaningless.
+ // When it did so, it would always use the camel-cased field name.
+ if xd.GetJsonName() != strs.JSONCamelCase(string(x.Name())) {
+ return errors.New("extension field %q may not have an explicitly set JSON name: %q", x.FullName(), xd.GetJsonName())
+ }
+ }
+ if xd.OneofIndex != nil {
+ return errors.New("extension field %q may not be part of a oneof", x.FullName())
+ }
+ if md := x.ContainingMessage(); !md.IsPlaceholder() {
+ if !md.ExtensionRanges().Has(x.Number()) {
+ return errors.New("extension field %q extends %q with non-extension field number: %d", x.FullName(), md.FullName(), x.Number())
+ }
+ isMessageSet := md.Options().(*descriptorpb.MessageOptions).GetMessageSetWireFormat()
+ if isMessageSet && !isOptionalMessage(x) {
+ return errors.New("extension field %q extends MessageSet and must be an optional message", x.FullName())
+ }
+ if !isMessageSet && !x.Number().IsValid() {
+ return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number())
+ }
+ }
+ if xd.GetOptions().GetWeak() {
+ return errors.New("extension field %q cannot be a weak reference", x.FullName())
+ }
+ if x.IsPacked() && !isPackable(x) {
+ return errors.New("extension field %q is not packable", x.FullName())
+ }
+ if err := checkValidGroup(x); err != nil {
+ return errors.New("extension field %q is an invalid group: %v", x.FullName(), err)
+ }
+ if md := x.Message(); md != nil && md.IsMapEntry() {
+ return errors.New("extension field %q cannot be a map entry", x.FullName())
+ }
+ if x.Syntax() == protoreflect.Proto3 {
+ switch x.ContainingMessage().FullName() {
+ case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName():
+ case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName():
+ case (*descriptorpb.EnumValueOptions)(nil).ProtoReflect().Descriptor().FullName():
+ case (*descriptorpb.MessageOptions)(nil).ProtoReflect().Descriptor().FullName():
+ case (*descriptorpb.FieldOptions)(nil).ProtoReflect().Descriptor().FullName():
+ case (*descriptorpb.OneofOptions)(nil).ProtoReflect().Descriptor().FullName():
+ case (*descriptorpb.ExtensionRangeOptions)(nil).ProtoReflect().Descriptor().FullName():
+ case (*descriptorpb.ServiceOptions)(nil).ProtoReflect().Descriptor().FullName():
+ case (*descriptorpb.MethodOptions)(nil).ProtoReflect().Descriptor().FullName():
+ default:
+ return errors.New("extension field %q cannot be declared in proto3 unless extended descriptor options", x.FullName())
+ }
+ }
+ }
+ return nil
+}
+
+// isOptionalMessage reports whether this is an optional message.
+// If the kind is unknown, it is assumed to be a message.
+func isOptionalMessage(fd protoreflect.FieldDescriptor) bool {
+ return (fd.Kind() == 0 || fd.Kind() == protoreflect.MessageKind) && fd.Cardinality() == protoreflect.Optional
+}
+
+// isPackable checks whether the pack option can be specified.
+func isPackable(fd protoreflect.FieldDescriptor) bool {
+ switch fd.Kind() {
+ case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind:
+ return false
+ }
+ return fd.IsList()
+}
+
+// checkValidGroup reports whether fd is a valid group according to the same
+// rules that protoc imposes.
+func checkValidGroup(fd protoreflect.FieldDescriptor) error {
+ md := fd.Message()
+ switch {
+ case fd.Kind() != protoreflect.GroupKind:
+ return nil
+ case fd.Syntax() != protoreflect.Proto2:
+ return errors.New("invalid under proto2 semantics")
+ case md == nil || md.IsPlaceholder():
+ return errors.New("message must be resolvable")
+ case fd.FullName().Parent() != md.FullName().Parent():
+ return errors.New("message and field must be declared in the same scope")
+ case !unicode.IsUpper(rune(md.Name()[0])):
+ return errors.New("message name must start with an uppercase")
+ case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))):
+ return errors.New("field name must be lowercased form of the message name")
+ }
+ return nil
+}
+
+// checkValidMap checks whether the field is a valid map according to the same
+// rules that protoc imposes.
+// See protoc v3.8.0: src/google/protobuf/descriptor.cc:6045-6115
+func checkValidMap(fd protoreflect.FieldDescriptor) error {
+ md := fd.Message()
+ switch {
+ case md == nil || !md.IsMapEntry():
+ return nil
+ case fd.FullName().Parent() != md.FullName().Parent():
+ return errors.New("message and field must be declared in the same scope")
+ case md.Name() != protoreflect.Name(strs.MapEntryName(string(fd.Name()))):
+ return errors.New("incorrect implicit map entry name")
+ case fd.Cardinality() != protoreflect.Repeated:
+ return errors.New("field must be repeated")
+ case md.Fields().Len() != 2:
+ return errors.New("message must have exactly two fields")
+ case md.ExtensionRanges().Len() > 0:
+ return errors.New("message must not have any extension ranges")
+ case md.Enums().Len()+md.Messages().Len()+md.Extensions().Len() > 0:
+ return errors.New("message must not have any nested declarations")
+ }
+ kf := md.Fields().Get(0)
+ vf := md.Fields().Get(1)
+ switch {
+ case kf.Name() != genid.MapEntry_Key_field_name || kf.Number() != genid.MapEntry_Key_field_number || kf.Cardinality() != protoreflect.Optional || kf.ContainingOneof() != nil || kf.HasDefault():
+ return errors.New("invalid key field")
+ case vf.Name() != genid.MapEntry_Value_field_name || vf.Number() != genid.MapEntry_Value_field_number || vf.Cardinality() != protoreflect.Optional || vf.ContainingOneof() != nil || vf.HasDefault():
+ return errors.New("invalid value field")
+ }
+ switch kf.Kind() {
+ case protoreflect.BoolKind: // bool
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: // int32
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: // int64
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: // uint32
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: // uint64
+ case protoreflect.StringKind: // string
+ default:
+ return errors.New("invalid key kind: %v", kf.Kind())
+ }
+ if e := vf.Enum(); e != nil && e.Values().Len() > 0 && e.Values().Get(0).Number() != 0 {
+ return errors.New("map enum value must have zero number for the first value")
+ }
+ return nil
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
new file mode 100644
index 0000000..a7c5cef
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
@@ -0,0 +1,252 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protodesc
+
+import (
+ "fmt"
+ "strings"
+
+ "google.golang.org/protobuf/internal/encoding/defval"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ "google.golang.org/protobuf/types/descriptorpb"
+)
+
+// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a
+// google.protobuf.FileDescriptorProto message.
+func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto {
+ p := &descriptorpb.FileDescriptorProto{
+ Name: proto.String(file.Path()),
+ Options: proto.Clone(file.Options()).(*descriptorpb.FileOptions),
+ }
+ if file.Package() != "" {
+ p.Package = proto.String(string(file.Package()))
+ }
+ for i, imports := 0, file.Imports(); i < imports.Len(); i++ {
+ imp := imports.Get(i)
+ p.Dependency = append(p.Dependency, imp.Path())
+ if imp.IsPublic {
+ p.PublicDependency = append(p.PublicDependency, int32(i))
+ }
+ if imp.IsWeak {
+ p.WeakDependency = append(p.WeakDependency, int32(i))
+ }
+ }
+ for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ {
+ loc := locs.Get(i)
+ l := &descriptorpb.SourceCodeInfo_Location{}
+ l.Path = append(l.Path, loc.Path...)
+ if loc.StartLine == loc.EndLine {
+ l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndColumn)}
+ } else {
+ l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndLine), int32(loc.EndColumn)}
+ }
+ l.LeadingDetachedComments = append([]string(nil), loc.LeadingDetachedComments...)
+ if loc.LeadingComments != "" {
+ l.LeadingComments = proto.String(loc.LeadingComments)
+ }
+ if loc.TrailingComments != "" {
+ l.TrailingComments = proto.String(loc.TrailingComments)
+ }
+ if p.SourceCodeInfo == nil {
+ p.SourceCodeInfo = &descriptorpb.SourceCodeInfo{}
+ }
+ p.SourceCodeInfo.Location = append(p.SourceCodeInfo.Location, l)
+
+ }
+ for i, messages := 0, file.Messages(); i < messages.Len(); i++ {
+ p.MessageType = append(p.MessageType, ToDescriptorProto(messages.Get(i)))
+ }
+ for i, enums := 0, file.Enums(); i < enums.Len(); i++ {
+ p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i)))
+ }
+ for i, services := 0, file.Services(); i < services.Len(); i++ {
+ p.Service = append(p.Service, ToServiceDescriptorProto(services.Get(i)))
+ }
+ for i, exts := 0, file.Extensions(); i < exts.Len(); i++ {
+ p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i)))
+ }
+ if syntax := file.Syntax(); syntax != protoreflect.Proto2 {
+ p.Syntax = proto.String(file.Syntax().String())
+ }
+ return p
+}
+
+// ToDescriptorProto copies a protoreflect.MessageDescriptor into a
+// google.protobuf.DescriptorProto message.
+func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto {
+ p := &descriptorpb.DescriptorProto{
+ Name: proto.String(string(message.Name())),
+ Options: proto.Clone(message.Options()).(*descriptorpb.MessageOptions),
+ }
+ for i, fields := 0, message.Fields(); i < fields.Len(); i++ {
+ p.Field = append(p.Field, ToFieldDescriptorProto(fields.Get(i)))
+ }
+ for i, exts := 0, message.Extensions(); i < exts.Len(); i++ {
+ p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i)))
+ }
+ for i, messages := 0, message.Messages(); i < messages.Len(); i++ {
+ p.NestedType = append(p.NestedType, ToDescriptorProto(messages.Get(i)))
+ }
+ for i, enums := 0, message.Enums(); i < enums.Len(); i++ {
+ p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i)))
+ }
+ for i, xranges := 0, message.ExtensionRanges(); i < xranges.Len(); i++ {
+ xrange := xranges.Get(i)
+ p.ExtensionRange = append(p.ExtensionRange, &descriptorpb.DescriptorProto_ExtensionRange{
+ Start: proto.Int32(int32(xrange[0])),
+ End: proto.Int32(int32(xrange[1])),
+ Options: proto.Clone(message.ExtensionRangeOptions(i)).(*descriptorpb.ExtensionRangeOptions),
+ })
+ }
+ for i, oneofs := 0, message.Oneofs(); i < oneofs.Len(); i++ {
+ p.OneofDecl = append(p.OneofDecl, ToOneofDescriptorProto(oneofs.Get(i)))
+ }
+ for i, ranges := 0, message.ReservedRanges(); i < ranges.Len(); i++ {
+ rrange := ranges.Get(i)
+ p.ReservedRange = append(p.ReservedRange, &descriptorpb.DescriptorProto_ReservedRange{
+ Start: proto.Int32(int32(rrange[0])),
+ End: proto.Int32(int32(rrange[1])),
+ })
+ }
+ for i, names := 0, message.ReservedNames(); i < names.Len(); i++ {
+ p.ReservedName = append(p.ReservedName, string(names.Get(i)))
+ }
+ return p
+}
+
+// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a
+// google.protobuf.FieldDescriptorProto message.
+func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto {
+ p := &descriptorpb.FieldDescriptorProto{
+ Name: proto.String(string(field.Name())),
+ Number: proto.Int32(int32(field.Number())),
+ Label: descriptorpb.FieldDescriptorProto_Label(field.Cardinality()).Enum(),
+ Options: proto.Clone(field.Options()).(*descriptorpb.FieldOptions),
+ }
+ if field.IsExtension() {
+ p.Extendee = fullNameOf(field.ContainingMessage())
+ }
+ if field.Kind().IsValid() {
+ p.Type = descriptorpb.FieldDescriptorProto_Type(field.Kind()).Enum()
+ }
+ if field.Enum() != nil {
+ p.TypeName = fullNameOf(field.Enum())
+ }
+ if field.Message() != nil {
+ p.TypeName = fullNameOf(field.Message())
+ }
+ if field.HasJSONName() {
+ // A bug in older versions of protoc would always populate the
+ // "json_name" option for extensions when it is meaningless.
+ // When it did so, it would always use the camel-cased field name.
+ if field.IsExtension() {
+ p.JsonName = proto.String(strs.JSONCamelCase(string(field.Name())))
+ } else {
+ p.JsonName = proto.String(field.JSONName())
+ }
+ }
+ if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() {
+ p.Proto3Optional = proto.Bool(true)
+ }
+ if field.HasDefault() {
+ def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor)
+ if err != nil && field.DefaultEnumValue() != nil {
+ def = string(field.DefaultEnumValue().Name()) // occurs for unresolved enum values
+ } else if err != nil {
+ panic(fmt.Sprintf("%v: %v", field.FullName(), err))
+ }
+ p.DefaultValue = proto.String(def)
+ }
+ if oneof := field.ContainingOneof(); oneof != nil {
+ p.OneofIndex = proto.Int32(int32(oneof.Index()))
+ }
+ return p
+}
+
+// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a
+// google.protobuf.OneofDescriptorProto message.
+func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto {
+ return &descriptorpb.OneofDescriptorProto{
+ Name: proto.String(string(oneof.Name())),
+ Options: proto.Clone(oneof.Options()).(*descriptorpb.OneofOptions),
+ }
+}
+
+// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a
+// google.protobuf.EnumDescriptorProto message.
+func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto {
+ p := &descriptorpb.EnumDescriptorProto{
+ Name: proto.String(string(enum.Name())),
+ Options: proto.Clone(enum.Options()).(*descriptorpb.EnumOptions),
+ }
+ for i, values := 0, enum.Values(); i < values.Len(); i++ {
+ p.Value = append(p.Value, ToEnumValueDescriptorProto(values.Get(i)))
+ }
+ for i, ranges := 0, enum.ReservedRanges(); i < ranges.Len(); i++ {
+ rrange := ranges.Get(i)
+ p.ReservedRange = append(p.ReservedRange, &descriptorpb.EnumDescriptorProto_EnumReservedRange{
+ Start: proto.Int32(int32(rrange[0])),
+ End: proto.Int32(int32(rrange[1])),
+ })
+ }
+ for i, names := 0, enum.ReservedNames(); i < names.Len(); i++ {
+ p.ReservedName = append(p.ReservedName, string(names.Get(i)))
+ }
+ return p
+}
+
+// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a
+// google.protobuf.EnumValueDescriptorProto message.
+func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto {
+ return &descriptorpb.EnumValueDescriptorProto{
+ Name: proto.String(string(value.Name())),
+ Number: proto.Int32(int32(value.Number())),
+ Options: proto.Clone(value.Options()).(*descriptorpb.EnumValueOptions),
+ }
+}
+
+// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a
+// google.protobuf.ServiceDescriptorProto message.
+func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto {
+ p := &descriptorpb.ServiceDescriptorProto{
+ Name: proto.String(string(service.Name())),
+ Options: proto.Clone(service.Options()).(*descriptorpb.ServiceOptions),
+ }
+ for i, methods := 0, service.Methods(); i < methods.Len(); i++ {
+ p.Method = append(p.Method, ToMethodDescriptorProto(methods.Get(i)))
+ }
+ return p
+}
+
+// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a
+// google.protobuf.MethodDescriptorProto message.
+func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto {
+ p := &descriptorpb.MethodDescriptorProto{
+ Name: proto.String(string(method.Name())),
+ InputType: fullNameOf(method.Input()),
+ OutputType: fullNameOf(method.Output()),
+ Options: proto.Clone(method.Options()).(*descriptorpb.MethodOptions),
+ }
+ if method.IsStreamingClient() {
+ p.ClientStreaming = proto.Bool(true)
+ }
+ if method.IsStreamingServer() {
+ p.ServerStreaming = proto.Bool(true)
+ }
+ return p
+}
+
+func fullNameOf(d protoreflect.Descriptor) *string {
+ if d == nil {
+ return nil
+ }
+ if strings.HasPrefix(string(d.FullName()), unknownPrefix) {
+ return proto.String(string(d.FullName()[len(unknownPrefix):]))
+ }
+ return proto.String("." + string(d.FullName()))
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
index 1b89bed..dd85915 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
@@ -22,8 +22,9 @@
//
// The protobuf descriptor interfaces are not meant to be implemented by
// user code since they might need to be extended in the future to support
-// additions to the protobuf language. Protobuf descriptors can be constructed
-// using the "google.golang.org/protobuf/reflect/protodesc" package.
+// additions to the protobuf language.
+// The "google.golang.org/protobuf/reflect/protodesc" package converts between
+// google.protobuf.DescriptorProto messages and protobuf descriptors.
//
//
// Go Type Descriptors
@@ -127,7 +128,6 @@
import (
"fmt"
- "regexp"
"strings"
"google.golang.org/protobuf/encoding/protowire"
@@ -407,19 +407,14 @@
doNotImplement
}
-var (
- regexName = regexp.MustCompile(`^[_a-zA-Z][_a-zA-Z0-9]*$`)
- regexFullName = regexp.MustCompile(`^[_a-zA-Z][_a-zA-Z0-9]*(\.[_a-zA-Z][_a-zA-Z0-9]*)*$`)
-)
-
// Name is the short name for a proto declaration. This is not the name
// as used in Go source code, which might not be identical to the proto name.
type Name string // e.g., "Kind"
-// IsValid reports whether n is a syntactically valid name.
+// IsValid reports whether s is a syntactically valid name.
// An empty name is invalid.
-func (n Name) IsValid() bool {
- return regexName.MatchString(string(n))
+func (s Name) IsValid() bool {
+ return consumeIdent(string(s)) == len(s)
}
// Names represent a list of names.
@@ -442,10 +437,42 @@
// This should not have any leading or trailing dots.
type FullName string // e.g., "google.protobuf.Field.Kind"
-// IsValid reports whether n is a syntactically valid full name.
+// IsValid reports whether s is a syntactically valid full name.
// An empty full name is invalid.
-func (n FullName) IsValid() bool {
- return regexFullName.MatchString(string(n))
+func (s FullName) IsValid() bool {
+ i := consumeIdent(string(s))
+ if i < 0 {
+ return false
+ }
+ for len(s) > i {
+ if s[i] != '.' {
+ return false
+ }
+ i++
+ n := consumeIdent(string(s[i:]))
+ if n < 0 {
+ return false
+ }
+ i += n
+ }
+ return true
+}
+
+func consumeIdent(s string) (i int) {
+ if len(s) == 0 || !isLetter(s[i]) {
+ return -1
+ }
+ i++
+ for len(s) > i && isLetterDigit(s[i]) {
+ i++
+ }
+ return i
+}
+func isLetter(c byte) bool {
+ return c == '_' || ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z')
+}
+func isLetterDigit(c byte) bool {
+ return isLetter(c) || ('0' <= c && c <= '9')
}
// Name returns the short name, which is the last identifier segment.
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go
index 32ea3d9..121ba3a 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go
@@ -4,6 +4,10 @@
package protoreflect
+import (
+ "strconv"
+)
+
// SourceLocations is a list of source locations.
type SourceLocations interface {
// Len reports the number of source locations in the proto file.
@@ -11,9 +15,20 @@
// Get returns the ith SourceLocation. It panics if out of bounds.
Get(int) SourceLocation
- doNotImplement
+ // ByPath returns the SourceLocation for the given path,
+ // returning the first location if multiple exist for the same path.
+ // If multiple locations exist for the same path,
+ // then SourceLocation.Next index can be used to identify the
+ // index of the next SourceLocation.
+ // If no location exists for this path, it returns the zero value.
+ ByPath(path SourcePath) SourceLocation
- // TODO: Add ByPath and ByDescriptor helper methods.
+ // ByDescriptor returns the SourceLocation for the given descriptor,
+ // returning the first location if multiple exist for the same path.
+ // If no location exists for this descriptor, it returns the zero value.
+ ByDescriptor(desc Descriptor) SourceLocation
+
+ doNotImplement
}
// SourceLocation describes a source location and
@@ -39,6 +54,10 @@
LeadingComments string
// TrailingComments is the trailing attached comment for the declaration.
TrailingComments string
+
+ // Next is an index into SourceLocations for the next source location that
+ // has the same Path. It is zero if there is no next location.
+ Next int
}
// SourcePath identifies part of a file descriptor for a source location.
@@ -48,5 +67,62 @@
// See google.protobuf.SourceCodeInfo.Location.path.
type SourcePath []int32
-// TODO: Add SourcePath.String method to pretty-print the path. For example:
-// ".message_type[6].nested_type[15].field[3]"
+// Equal reports whether p1 equals p2.
+func (p1 SourcePath) Equal(p2 SourcePath) bool {
+ if len(p1) != len(p2) {
+ return false
+ }
+ for i := range p1 {
+ if p1[i] != p2[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// String formats the path in a humanly readable manner.
+// The output is guaranteed to be deterministic,
+// making it suitable for use as a key into a Go map.
+// It is not guaranteed to be stable as the exact output could change
+// in a future version of this module.
+//
+// Example output:
+// .message_type[6].nested_type[15].field[3]
+func (p SourcePath) String() string {
+ b := p.appendFileDescriptorProto(nil)
+ for _, i := range p {
+ b = append(b, '.')
+ b = strconv.AppendInt(b, int64(i), 10)
+ }
+ return string(b)
+}
+
+type appendFunc func(*SourcePath, []byte) []byte
+
+func (p *SourcePath) appendSingularField(b []byte, name string, f appendFunc) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ b = append(b, '.')
+ b = append(b, name...)
+ *p = (*p)[1:]
+ if f != nil {
+ b = f(p, b)
+ }
+ return b
+}
+
+func (p *SourcePath) appendRepeatedField(b []byte, name string, f appendFunc) []byte {
+ b = p.appendSingularField(b, name, nil)
+ if len(*p) == 0 || (*p)[0] < 0 {
+ return b
+ }
+ b = append(b, '[')
+ b = strconv.AppendUint(b, uint64((*p)[0]), 10)
+ b = append(b, ']')
+ *p = (*p)[1:]
+ if f != nil {
+ b = f(p, b)
+ }
+ return b
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
new file mode 100644
index 0000000..b03c122
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
@@ -0,0 +1,461 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package protoreflect
+
+func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "name", nil)
+ case 2:
+ b = p.appendSingularField(b, "package", nil)
+ case 3:
+ b = p.appendRepeatedField(b, "dependency", nil)
+ case 10:
+ b = p.appendRepeatedField(b, "public_dependency", nil)
+ case 11:
+ b = p.appendRepeatedField(b, "weak_dependency", nil)
+ case 4:
+ b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto)
+ case 5:
+ b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto)
+ case 6:
+ b = p.appendRepeatedField(b, "service", (*SourcePath).appendServiceDescriptorProto)
+ case 7:
+ b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto)
+ case 8:
+ b = p.appendSingularField(b, "options", (*SourcePath).appendFileOptions)
+ case 9:
+ b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo)
+ case 12:
+ b = p.appendSingularField(b, "syntax", nil)
+ }
+ return b
+}
+
+func (p *SourcePath) appendDescriptorProto(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "name", nil)
+ case 2:
+ b = p.appendRepeatedField(b, "field", (*SourcePath).appendFieldDescriptorProto)
+ case 6:
+ b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto)
+ case 3:
+ b = p.appendRepeatedField(b, "nested_type", (*SourcePath).appendDescriptorProto)
+ case 4:
+ b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto)
+ case 5:
+ b = p.appendRepeatedField(b, "extension_range", (*SourcePath).appendDescriptorProto_ExtensionRange)
+ case 8:
+ b = p.appendRepeatedField(b, "oneof_decl", (*SourcePath).appendOneofDescriptorProto)
+ case 7:
+ b = p.appendSingularField(b, "options", (*SourcePath).appendMessageOptions)
+ case 9:
+ b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange)
+ case 10:
+ b = p.appendRepeatedField(b, "reserved_name", nil)
+ }
+ return b
+}
+
+func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "name", nil)
+ case 2:
+ b = p.appendRepeatedField(b, "value", (*SourcePath).appendEnumValueDescriptorProto)
+ case 3:
+ b = p.appendSingularField(b, "options", (*SourcePath).appendEnumOptions)
+ case 4:
+ b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange)
+ case 5:
+ b = p.appendRepeatedField(b, "reserved_name", nil)
+ }
+ return b
+}
+
+func (p *SourcePath) appendServiceDescriptorProto(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "name", nil)
+ case 2:
+ b = p.appendRepeatedField(b, "method", (*SourcePath).appendMethodDescriptorProto)
+ case 3:
+ b = p.appendSingularField(b, "options", (*SourcePath).appendServiceOptions)
+ }
+ return b
+}
+
+func (p *SourcePath) appendFieldDescriptorProto(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "name", nil)
+ case 3:
+ b = p.appendSingularField(b, "number", nil)
+ case 4:
+ b = p.appendSingularField(b, "label", nil)
+ case 5:
+ b = p.appendSingularField(b, "type", nil)
+ case 6:
+ b = p.appendSingularField(b, "type_name", nil)
+ case 2:
+ b = p.appendSingularField(b, "extendee", nil)
+ case 7:
+ b = p.appendSingularField(b, "default_value", nil)
+ case 9:
+ b = p.appendSingularField(b, "oneof_index", nil)
+ case 10:
+ b = p.appendSingularField(b, "json_name", nil)
+ case 8:
+ b = p.appendSingularField(b, "options", (*SourcePath).appendFieldOptions)
+ case 17:
+ b = p.appendSingularField(b, "proto3_optional", nil)
+ }
+ return b
+}
+
+func (p *SourcePath) appendFileOptions(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "java_package", nil)
+ case 8:
+ b = p.appendSingularField(b, "java_outer_classname", nil)
+ case 10:
+ b = p.appendSingularField(b, "java_multiple_files", nil)
+ case 20:
+ b = p.appendSingularField(b, "java_generate_equals_and_hash", nil)
+ case 27:
+ b = p.appendSingularField(b, "java_string_check_utf8", nil)
+ case 9:
+ b = p.appendSingularField(b, "optimize_for", nil)
+ case 11:
+ b = p.appendSingularField(b, "go_package", nil)
+ case 16:
+ b = p.appendSingularField(b, "cc_generic_services", nil)
+ case 17:
+ b = p.appendSingularField(b, "java_generic_services", nil)
+ case 18:
+ b = p.appendSingularField(b, "py_generic_services", nil)
+ case 42:
+ b = p.appendSingularField(b, "php_generic_services", nil)
+ case 23:
+ b = p.appendSingularField(b, "deprecated", nil)
+ case 31:
+ b = p.appendSingularField(b, "cc_enable_arenas", nil)
+ case 36:
+ b = p.appendSingularField(b, "objc_class_prefix", nil)
+ case 37:
+ b = p.appendSingularField(b, "csharp_namespace", nil)
+ case 39:
+ b = p.appendSingularField(b, "swift_prefix", nil)
+ case 40:
+ b = p.appendSingularField(b, "php_class_prefix", nil)
+ case 41:
+ b = p.appendSingularField(b, "php_namespace", nil)
+ case 44:
+ b = p.appendSingularField(b, "php_metadata_namespace", nil)
+ case 45:
+ b = p.appendSingularField(b, "ruby_package", nil)
+ case 999:
+ b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+ }
+ return b
+}
+
+func (p *SourcePath) appendSourceCodeInfo(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendRepeatedField(b, "location", (*SourcePath).appendSourceCodeInfo_Location)
+ }
+ return b
+}
+
+func (p *SourcePath) appendDescriptorProto_ExtensionRange(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "start", nil)
+ case 2:
+ b = p.appendSingularField(b, "end", nil)
+ case 3:
+ b = p.appendSingularField(b, "options", (*SourcePath).appendExtensionRangeOptions)
+ }
+ return b
+}
+
+func (p *SourcePath) appendOneofDescriptorProto(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "name", nil)
+ case 2:
+ b = p.appendSingularField(b, "options", (*SourcePath).appendOneofOptions)
+ }
+ return b
+}
+
+func (p *SourcePath) appendMessageOptions(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "message_set_wire_format", nil)
+ case 2:
+ b = p.appendSingularField(b, "no_standard_descriptor_accessor", nil)
+ case 3:
+ b = p.appendSingularField(b, "deprecated", nil)
+ case 7:
+ b = p.appendSingularField(b, "map_entry", nil)
+ case 999:
+ b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+ }
+ return b
+}
+
+func (p *SourcePath) appendDescriptorProto_ReservedRange(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "start", nil)
+ case 2:
+ b = p.appendSingularField(b, "end", nil)
+ }
+ return b
+}
+
+func (p *SourcePath) appendEnumValueDescriptorProto(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "name", nil)
+ case 2:
+ b = p.appendSingularField(b, "number", nil)
+ case 3:
+ b = p.appendSingularField(b, "options", (*SourcePath).appendEnumValueOptions)
+ }
+ return b
+}
+
+func (p *SourcePath) appendEnumOptions(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 2:
+ b = p.appendSingularField(b, "allow_alias", nil)
+ case 3:
+ b = p.appendSingularField(b, "deprecated", nil)
+ case 999:
+ b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+ }
+ return b
+}
+
+func (p *SourcePath) appendEnumDescriptorProto_EnumReservedRange(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "start", nil)
+ case 2:
+ b = p.appendSingularField(b, "end", nil)
+ }
+ return b
+}
+
+func (p *SourcePath) appendMethodDescriptorProto(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "name", nil)
+ case 2:
+ b = p.appendSingularField(b, "input_type", nil)
+ case 3:
+ b = p.appendSingularField(b, "output_type", nil)
+ case 4:
+ b = p.appendSingularField(b, "options", (*SourcePath).appendMethodOptions)
+ case 5:
+ b = p.appendSingularField(b, "client_streaming", nil)
+ case 6:
+ b = p.appendSingularField(b, "server_streaming", nil)
+ }
+ return b
+}
+
+func (p *SourcePath) appendServiceOptions(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 33:
+ b = p.appendSingularField(b, "deprecated", nil)
+ case 999:
+ b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+ }
+ return b
+}
+
+func (p *SourcePath) appendFieldOptions(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "ctype", nil)
+ case 2:
+ b = p.appendSingularField(b, "packed", nil)
+ case 6:
+ b = p.appendSingularField(b, "jstype", nil)
+ case 5:
+ b = p.appendSingularField(b, "lazy", nil)
+ case 3:
+ b = p.appendSingularField(b, "deprecated", nil)
+ case 10:
+ b = p.appendSingularField(b, "weak", nil)
+ case 999:
+ b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+ }
+ return b
+}
+
+func (p *SourcePath) appendUninterpretedOption(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 2:
+ b = p.appendRepeatedField(b, "name", (*SourcePath).appendUninterpretedOption_NamePart)
+ case 3:
+ b = p.appendSingularField(b, "identifier_value", nil)
+ case 4:
+ b = p.appendSingularField(b, "positive_int_value", nil)
+ case 5:
+ b = p.appendSingularField(b, "negative_int_value", nil)
+ case 6:
+ b = p.appendSingularField(b, "double_value", nil)
+ case 7:
+ b = p.appendSingularField(b, "string_value", nil)
+ case 8:
+ b = p.appendSingularField(b, "aggregate_value", nil)
+ }
+ return b
+}
+
+func (p *SourcePath) appendSourceCodeInfo_Location(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendRepeatedField(b, "path", nil)
+ case 2:
+ b = p.appendRepeatedField(b, "span", nil)
+ case 3:
+ b = p.appendSingularField(b, "leading_comments", nil)
+ case 4:
+ b = p.appendSingularField(b, "trailing_comments", nil)
+ case 6:
+ b = p.appendRepeatedField(b, "leading_detached_comments", nil)
+ }
+ return b
+}
+
+func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 999:
+ b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+ }
+ return b
+}
+
+func (p *SourcePath) appendOneofOptions(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 999:
+ b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+ }
+ return b
+}
+
+func (p *SourcePath) appendEnumValueOptions(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "deprecated", nil)
+ case 999:
+ b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+ }
+ return b
+}
+
+func (p *SourcePath) appendMethodOptions(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 33:
+ b = p.appendSingularField(b, "deprecated", nil)
+ case 34:
+ b = p.appendSingularField(b, "idempotency_level", nil)
+ case 999:
+ b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+ }
+ return b
+}
+
+func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "name_part", nil)
+ case 2:
+ b = p.appendSingularField(b, "is_extension", nil)
+ }
+ return b
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
index 41d560e..8e53c44 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
@@ -232,11 +232,15 @@
type isMessageDescriptor interface{ ProtoType(MessageDescriptor) }
// MessageType encapsulates a MessageDescriptor with a concrete Go implementation.
+// It is recommended that implementations of this interface also implement the
+// MessageFieldTypes interface.
type MessageType interface {
// New returns a newly allocated empty message.
+ // It may return nil for synthetic messages representing a map entry.
New() Message
// Zero returns an empty, read-only message.
+ // It may return nil for synthetic messages representing a map entry.
Zero() Message
// Descriptor returns the message descriptor.
@@ -245,6 +249,26 @@
Descriptor() MessageDescriptor
}
+// MessageFieldTypes extends a MessageType by providing type information
+// regarding enums and messages referenced by the message fields.
+type MessageFieldTypes interface {
+ MessageType
+
+ // Enum returns the EnumType for the ith field in Descriptor.Fields.
+ // It returns nil if the ith field is not an enum kind.
+ // It panics if out of bounds.
+ //
+ // Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum()
+ Enum(i int) EnumType
+
+ // Message returns the MessageType for the ith field in Descriptor.Fields.
+ // It returns nil if the ith field is not a message or group kind.
+ // It panics if out of bounds.
+ //
+ // Invariant: mt.Message(i).Descriptor() == mt.Descriptor().Fields(i).Message()
+ Message(i int) MessageType
+}
+
// MessageDescriptors is a list of message declarations.
type MessageDescriptors interface {
// Len reports the number of messages.
@@ -279,13 +303,28 @@
// JSONName reports the name used for JSON serialization.
// It is usually the camel-cased form of the field name.
+ // Extension fields are represented by the full name surrounded by brackets.
JSONName() string
+ // TextName reports the name used for text serialization.
+ // It is usually the name of the field, except that groups use the name
+ // of the inlined message, and extension fields are represented by the
+ // full name surrounded by brackets.
+ TextName() string
+
+ // HasPresence reports whether the field distinguishes between unpopulated
+ // and default values.
+ HasPresence() bool
+
// IsExtension reports whether this is an extension field. If false,
// then Parent and ContainingMessage refer to the same message.
// Otherwise, ContainingMessage and Parent likely differ.
IsExtension() bool
+ // HasOptionalKeyword reports whether the "optional" keyword was explicitly
+ // specified in the source .proto file.
+ HasOptionalKeyword() bool
+
// IsWeak reports whether this is a weak field, which does not impose a
// direct dependency on the target type.
// If true, then Message returns a placeholder type.
@@ -363,6 +402,9 @@
// ByJSONName returns the FieldDescriptor for a field with s as the JSON name.
// It returns nil if not found.
ByJSONName(s string) FieldDescriptor
+ // ByTextName returns the FieldDescriptor for a field with s as the text name.
+ // It returns nil if not found.
+ ByTextName(s string) FieldDescriptor
// ByNumber returns the FieldDescriptor for a field numbered n.
// It returns nil if not found.
ByNumber(n FieldNumber) FieldDescriptor
@@ -375,6 +417,11 @@
type OneofDescriptor interface {
Descriptor
+ // IsSynthetic reports whether this is a synthetic oneof created to support
+ // proto3 optional semantics. If true, Fields contains exactly one field
+ // with HasOptionalKeyword specified.
+ IsSynthetic() bool
+
// Fields is a list of fields belonging to this oneof.
Fields() FieldDescriptors
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
index a352ed9..f319810 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
@@ -114,8 +114,8 @@
// Mutable is a mutating operation and unsafe for concurrent use.
Mutable(FieldDescriptor) Value
- // NewField returns a new value for assignable to the field of a given descriptor.
- // For scalars, this returns the default value.
+ // NewField returns a new value that is assignable to the field
+ // for the given descriptor. For scalars, this returns the default value.
// For lists, maps, and messages, this returns a new, empty, mutable value.
NewField(FieldDescriptor) Value
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
index d695a4d..5a34147 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
@@ -7,7 +7,6 @@
import (
"fmt"
"math"
- "reflect"
)
// Value is a union where only one Go type may be set at a time.
@@ -86,8 +85,10 @@
return ValueOfEnum(v)
case Message, List, Map:
return valueOfIface(v)
+ case ProtoMessage:
+ panic(fmt.Sprintf("invalid proto.Message(%T) type, expected a protoreflect.Message type", v))
default:
- panic(fmt.Sprintf("invalid type: %v", reflect.TypeOf(v)))
+ panic(fmt.Sprintf("invalid type: %T", v))
}
}
@@ -197,13 +198,55 @@
}
}
+func (v Value) typeName() string {
+ switch v.typ {
+ case nilType:
+ return "nil"
+ case boolType:
+ return "bool"
+ case int32Type:
+ return "int32"
+ case int64Type:
+ return "int64"
+ case uint32Type:
+ return "uint32"
+ case uint64Type:
+ return "uint64"
+ case float32Type:
+ return "float32"
+ case float64Type:
+ return "float64"
+ case stringType:
+ return "string"
+ case bytesType:
+ return "bytes"
+ case enumType:
+ return "enum"
+ default:
+ switch v := v.getIface().(type) {
+ case Message:
+ return "message"
+ case List:
+ return "list"
+ case Map:
+ return "map"
+ default:
+ return fmt.Sprintf("<unknown: %T>", v)
+ }
+ }
+}
+
+func (v Value) panicMessage(what string) string {
+ return fmt.Sprintf("type mismatch: cannot convert %v to %s", v.typeName(), what)
+}
+
// Bool returns v as a bool and panics if the type is not a bool.
func (v Value) Bool() bool {
switch v.typ {
case boolType:
return v.num > 0
default:
- panic("proto: value type mismatch")
+ panic(v.panicMessage("bool"))
}
}
@@ -213,7 +256,7 @@
case int32Type, int64Type:
return int64(v.num)
default:
- panic("proto: value type mismatch")
+ panic(v.panicMessage("int"))
}
}
@@ -223,7 +266,7 @@
case uint32Type, uint64Type:
return uint64(v.num)
default:
- panic("proto: value type mismatch")
+ panic(v.panicMessage("uint"))
}
}
@@ -233,7 +276,7 @@
case float32Type, float64Type:
return math.Float64frombits(uint64(v.num))
default:
- panic("proto: value type mismatch")
+ panic(v.panicMessage("float"))
}
}
@@ -254,7 +297,7 @@
case bytesType:
return v.getBytes()
default:
- panic("proto: value type mismatch")
+ panic(v.panicMessage("bytes"))
}
}
@@ -264,37 +307,37 @@
case enumType:
return EnumNumber(v.num)
default:
- panic("proto: value type mismatch")
+ panic(v.panicMessage("enum"))
}
}
// Message returns v as a Message and panics if the type is not a Message.
func (v Value) Message() Message {
- switch v := v.getIface().(type) {
+ switch vi := v.getIface().(type) {
case Message:
- return v
+ return vi
default:
- panic("proto: value type mismatch")
+ panic(v.panicMessage("message"))
}
}
// List returns v as a List and panics if the type is not a List.
func (v Value) List() List {
- switch v := v.getIface().(type) {
+ switch vi := v.getIface().(type) {
case List:
- return v
+ return vi
default:
- panic("proto: value type mismatch")
+ panic(v.panicMessage("list"))
}
}
// Map returns v as a Map and panics if the type is not a Map.
func (v Value) Map() Map {
- switch v := v.getIface().(type) {
+ switch vi := v.getIface().(type) {
case Map:
- return v
+ return vi
default:
- panic("proto: value type mismatch")
+ panic(v.panicMessage("map"))
}
}
@@ -303,8 +346,9 @@
switch v.typ {
case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType:
return MapKey(v)
+ default:
+ panic(v.panicMessage("map key"))
}
- panic("proto: invalid map key type")
}
// MapKey is used to index maps, where the Go type of the MapKey must match
diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
index 43f16c6..59f024c 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
@@ -17,24 +17,49 @@
import (
"fmt"
- "log"
+ "os"
"strings"
"sync"
+ "google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/flags"
"google.golang.org/protobuf/reflect/protoreflect"
)
+// conflictPolicy configures the policy for handling registration conflicts.
+//
+// It can be over-written at compile time with a linker-initialized variable:
+// go build -ldflags "-X google.golang.org/protobuf/reflect/protoregistry.conflictPolicy=warn"
+//
+// It can be over-written at program execution with an environment variable:
+// GOLANG_PROTOBUF_REGISTRATION_CONFLICT=warn ./main
+//
+// Neither of the above are covered by the compatibility promise and
+// may be removed in a future release of this module.
+var conflictPolicy = "panic" // "panic" | "warn" | "ignore"
+
// ignoreConflict reports whether to ignore a registration conflict
// given the descriptor being registered and the error.
// It is a variable so that the behavior is easily overridden in another file.
var ignoreConflict = func(d protoreflect.Descriptor, err error) bool {
- log.Printf(""+
- "WARNING: %v\n"+
- "A future release will panic on registration conflicts. See:\n"+
- "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict\n"+
- "\n", err)
- return true
+ const env = "GOLANG_PROTOBUF_REGISTRATION_CONFLICT"
+ const faq = "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict"
+ policy := conflictPolicy
+ if v := os.Getenv(env); v != "" {
+ policy = v
+ }
+ switch policy {
+ case "panic":
+ panic(fmt.Sprintf("%v\nSee %v\n", err, faq))
+ case "warn":
+ fmt.Fprintf(os.Stderr, "WARNING: %v\nSee %v\n\n", err, faq)
+ return true
+ case "ignore":
+ return true
+ default:
+ panic("invalid " + env + " value: " + os.Getenv(env))
+ }
}
var globalMutex sync.RWMutex
@@ -69,7 +94,8 @@
// Note that enum values are in the top-level since that are in the same
// scope as the parent enum.
descsByName map[protoreflect.FullName]interface{}
- filesByPath map[string]protoreflect.FileDescriptor
+ filesByPath map[string][]protoreflect.FileDescriptor
+ numFiles int
}
type packageDescriptor struct {
@@ -92,16 +118,16 @@
r.descsByName = map[protoreflect.FullName]interface{}{
"": &packageDescriptor{},
}
- r.filesByPath = make(map[string]protoreflect.FileDescriptor)
+ r.filesByPath = make(map[string][]protoreflect.FileDescriptor)
}
path := file.Path()
- if prev := r.filesByPath[path]; prev != nil {
+ if prev := r.filesByPath[path]; len(prev) > 0 {
+ r.checkGenProtoConflict(path)
err := errors.New("file %q is already registered", file.Path())
- err = amendErrorWithCaller(err, prev, file)
- if r == GlobalFiles && ignoreConflict(file, err) {
- err = nil
+ err = amendErrorWithCaller(err, prev[0], file)
+ if !(r == GlobalFiles && ignoreConflict(file, err)) {
+ return err
}
- return err
}
for name := file.Package(); name != ""; name = name.Parent() {
@@ -142,10 +168,52 @@
rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) {
r.descsByName[d.FullName()] = d
})
- r.filesByPath[path] = file
+ r.filesByPath[path] = append(r.filesByPath[path], file)
+ r.numFiles++
return nil
}
+// Several well-known types were hosted in the google.golang.org/genproto module
+// but were later moved to this module. To avoid a weak dependency on the
+// genproto module (and its relatively large set of transitive dependencies),
+// we rely on a registration conflict to determine whether the genproto version
+// is too old (i.e., does not contain aliases to the new type declarations).
+func (r *Files) checkGenProtoConflict(path string) {
+ if r != GlobalFiles {
+ return
+ }
+ var prevPath string
+ const prevModule = "google.golang.org/genproto"
+ const prevVersion = "cb27e3aa (May 26th, 2020)"
+ switch path {
+ case "google/protobuf/field_mask.proto":
+ prevPath = prevModule + "/protobuf/field_mask"
+ case "google/protobuf/api.proto":
+ prevPath = prevModule + "/protobuf/api"
+ case "google/protobuf/type.proto":
+ prevPath = prevModule + "/protobuf/ptype"
+ case "google/protobuf/source_context.proto":
+ prevPath = prevModule + "/protobuf/source_context"
+ default:
+ return
+ }
+ pkgName := strings.TrimSuffix(strings.TrimPrefix(path, "google/protobuf/"), ".proto")
+ pkgName = strings.Replace(pkgName, "_", "", -1) + "pb" // e.g., "field_mask" => "fieldmaskpb"
+ currPath := "google.golang.org/protobuf/types/known/" + pkgName
+ panic(fmt.Sprintf(""+
+ "duplicate registration of %q\n"+
+ "\n"+
+ "The generated definition for this file has moved:\n"+
+ "\tfrom: %q\n"+
+ "\tto: %q\n"+
+ "A dependency on the %q module must\n"+
+ "be at version %v or higher.\n"+
+ "\n"+
+ "Upgrade the dependency by running:\n"+
+ "\tgo get -u %v\n",
+ path, prevPath, currPath, prevModule, prevVersion, prevPath))
+}
+
// FindDescriptorByName looks up a descriptor by the full name.
//
// This returns (nil, NotFound) if not found.
@@ -241,6 +309,7 @@
// FindFileByPath looks up a file by the path.
//
// This returns (nil, NotFound) if not found.
+// This returns an error if multiple files have the same path.
func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
if r == nil {
return nil, NotFound
@@ -249,13 +318,19 @@
globalMutex.RLock()
defer globalMutex.RUnlock()
}
- if fd, ok := r.filesByPath[path]; ok {
- return fd, nil
+ fds := r.filesByPath[path]
+ switch len(fds) {
+ case 0:
+ return nil, NotFound
+ case 1:
+ return fds[0], nil
+ default:
+ return nil, errors.New("multiple files named %q", path)
}
- return nil, NotFound
}
-// NumFiles reports the number of registered files.
+// NumFiles reports the number of registered files,
+// including duplicate files with the same name.
func (r *Files) NumFiles() int {
if r == nil {
return 0
@@ -264,10 +339,11 @@
globalMutex.RLock()
defer globalMutex.RUnlock()
}
- return len(r.filesByPath)
+ return r.numFiles
}
// RangeFiles iterates over all registered files while f returns true.
+// If multiple files have the same name, RangeFiles iterates over all of them.
// The iteration order is undefined.
func (r *Files) RangeFiles(f func(protoreflect.FileDescriptor) bool) {
if r == nil {
@@ -277,9 +353,11 @@
globalMutex.RLock()
defer globalMutex.RUnlock()
}
- for _, file := range r.filesByPath {
- if !f(file) {
- return
+ for _, files := range r.filesByPath {
+ for _, file := range files {
+ if !f(file) {
+ return
+ }
}
}
}
@@ -528,13 +606,25 @@
return nil, NotFound
}
-// FindMessageByName looks up a message by its full name.
-// E.g., "google.protobuf.Any"
+// FindMessageByName looks up a message by its full name,
+// e.g. "google.protobuf.Any".
//
-// This return (nil, NotFound) if not found.
+// This returns (nil, NotFound) if not found.
func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
- // The full name by itself is a valid URL.
- return r.FindMessageByURL(string(message))
+ if r == nil {
+ return nil, NotFound
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ if v := r.typesByName[message]; v != nil {
+ if mt, _ := v.(protoreflect.MessageType); mt != nil {
+ return mt, nil
+ }
+ return nil, errors.New("found wrong type: got %v, want message", typeName(v))
+ }
+ return nil, NotFound
}
// FindMessageByURL looks up a message by a URL identifier.
@@ -542,6 +632,8 @@
//
// This returns (nil, NotFound) if not found.
func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) {
+ // This function is similar to FindMessageByName but
+ // truncates anything before and including '/' in the URL.
if r == nil {
return nil, NotFound
}
@@ -581,6 +673,26 @@
if xt, _ := v.(protoreflect.ExtensionType); xt != nil {
return xt, nil
}
+
+ // MessageSet extensions are special in that the name of the extension
+ // is the name of the message type used to extend the MessageSet.
+ // This naming scheme is used by text and JSON serialization.
+ //
+ // This feature is protected by the ProtoLegacy flag since MessageSets
+ // are a proto1 feature that is long deprecated.
+ if flags.ProtoLegacy {
+ if _, ok := v.(protoreflect.MessageType); ok {
+ field := field.Append(messageset.ExtensionName)
+ if v := r.typesByName[field]; v != nil {
+ if xt, _ := v.(protoreflect.ExtensionType); xt != nil {
+ if messageset.IsMessageSetExtension(xt.TypeDescriptor()) {
+ return xt, nil
+ }
+ }
+ }
+ }
+ }
+
return nil, errors.New("found wrong type: got %v, want extension", typeName(v))
}
return nil, NotFound
diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
index 7116dcf..abe4ab5 100644
--- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
+++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -43,7 +43,6 @@
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoiface "google.golang.org/protobuf/runtime/protoiface"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
@@ -829,15 +828,6 @@
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3}
}
-var extRange_ExtensionRangeOptions = []protoiface.ExtensionRangeV1{
- {Start: 1000, End: 536870911},
-}
-
-// Deprecated: Use ExtensionRangeOptions.ProtoReflect.Descriptor.ExtensionRanges instead.
-func (*ExtensionRangeOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 {
- return extRange_ExtensionRangeOptions
-}
-
func (x *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption {
if x != nil {
return x.UninterpretedOption
@@ -881,6 +871,28 @@
// it to camelCase.
JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"`
Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
+ // If true, this is a proto3 "optional". When a proto3 field is optional, it
+ // tracks presence regardless of field type.
+ //
+ // When proto3_optional is true, this field must be belong to a oneof to
+ // signal to old proto3 clients that presence is tracked for this field. This
+ // oneof is known as a "synthetic" oneof, and this field must be its sole
+ // member (each proto3 optional field gets its own synthetic oneof). Synthetic
+ // oneofs exist in the descriptor only, and do not generate any API. Synthetic
+ // oneofs must be ordered after all "real" oneofs.
+ //
+ // For message fields, proto3_optional doesn't create any semantic change,
+ // since non-repeated message fields always track presence. However it still
+ // indicates the semantic detail of whether the user wrote "optional" or not.
+ // This can be useful for round-tripping the .proto file. For consistency we
+ // give message fields a synthetic oneof also, even though it is not required
+ // to track presence. This is especially important because the parser can't
+ // tell if a field is a message or an enum, so it must always create a
+ // synthetic oneof.
+ //
+ // Proto2 optional fields do not set this flag, because they already indicate
+ // optional with `LABEL_OPTIONAL`.
+ Proto3Optional *bool `protobuf:"varint,17,opt,name=proto3_optional,json=proto3Optional" json:"proto3_optional,omitempty"`
}
func (x *FieldDescriptorProto) Reset() {
@@ -985,6 +997,13 @@
return nil
}
+func (x *FieldDescriptorProto) GetProto3Optional() bool {
+ if x != nil && x.Proto3Optional != nil {
+ return *x.Proto3Optional
+ }
+ return false
+}
+
// Describes a oneof.
type OneofDescriptorProto struct {
state protoimpl.MessageState
@@ -1415,7 +1434,7 @@
Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
// Enables the use of arenas for the proto messages in this file. This applies
// only to generated classes for C++.
- CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"`
+ CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=1" json:"cc_enable_arenas,omitempty"`
// Sets the objective c class prefix which is prepended to all objective c
// generated classes from this .proto. There is no default.
ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"`
@@ -1456,7 +1475,7 @@
Default_FileOptions_PyGenericServices = bool(false)
Default_FileOptions_PhpGenericServices = bool(false)
Default_FileOptions_Deprecated = bool(false)
- Default_FileOptions_CcEnableArenas = bool(false)
+ Default_FileOptions_CcEnableArenas = bool(true)
)
func (x *FileOptions) Reset() {
@@ -1491,15 +1510,6 @@
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10}
}
-var extRange_FileOptions = []protoiface.ExtensionRangeV1{
- {Start: 1000, End: 536870911},
-}
-
-// Deprecated: Use FileOptions.ProtoReflect.Descriptor.ExtensionRanges instead.
-func (*FileOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 {
- return extRange_FileOptions
-}
-
func (x *FileOptions) GetJavaPackage() string {
if x != nil && x.JavaPackage != nil {
return *x.JavaPackage
@@ -1747,15 +1757,6 @@
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{11}
}
-var extRange_MessageOptions = []protoiface.ExtensionRangeV1{
- {Start: 1000, End: 536870911},
-}
-
-// Deprecated: Use MessageOptions.ProtoReflect.Descriptor.ExtensionRanges instead.
-func (*MessageOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 {
- return extRange_MessageOptions
-}
-
func (x *MessageOptions) GetMessageSetWireFormat() bool {
if x != nil && x.MessageSetWireFormat != nil {
return *x.MessageSetWireFormat
@@ -1901,15 +1902,6 @@
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12}
}
-var extRange_FieldOptions = []protoiface.ExtensionRangeV1{
- {Start: 1000, End: 536870911},
-}
-
-// Deprecated: Use FieldOptions.ProtoReflect.Descriptor.ExtensionRanges instead.
-func (*FieldOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 {
- return extRange_FieldOptions
-}
-
func (x *FieldOptions) GetCtype() FieldOptions_CType {
if x != nil && x.Ctype != nil {
return *x.Ctype
@@ -2001,15 +1993,6 @@
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13}
}
-var extRange_OneofOptions = []protoiface.ExtensionRangeV1{
- {Start: 1000, End: 536870911},
-}
-
-// Deprecated: Use OneofOptions.ProtoReflect.Descriptor.ExtensionRanges instead.
-func (*OneofOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 {
- return extRange_OneofOptions
-}
-
func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption {
if x != nil {
return x.UninterpretedOption
@@ -2072,15 +2055,6 @@
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{14}
}
-var extRange_EnumOptions = []protoiface.ExtensionRangeV1{
- {Start: 1000, End: 536870911},
-}
-
-// Deprecated: Use EnumOptions.ProtoReflect.Descriptor.ExtensionRanges instead.
-func (*EnumOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 {
- return extRange_EnumOptions
-}
-
func (x *EnumOptions) GetAllowAlias() bool {
if x != nil && x.AllowAlias != nil {
return *x.AllowAlias
@@ -2154,15 +2128,6 @@
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{15}
}
-var extRange_EnumValueOptions = []protoiface.ExtensionRangeV1{
- {Start: 1000, End: 536870911},
-}
-
-// Deprecated: Use EnumValueOptions.ProtoReflect.Descriptor.ExtensionRanges instead.
-func (*EnumValueOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 {
- return extRange_EnumValueOptions
-}
-
func (x *EnumValueOptions) GetDeprecated() bool {
if x != nil && x.Deprecated != nil {
return *x.Deprecated
@@ -2229,15 +2194,6 @@
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16}
}
-var extRange_ServiceOptions = []protoiface.ExtensionRangeV1{
- {Start: 1000, End: 536870911},
-}
-
-// Deprecated: Use ServiceOptions.ProtoReflect.Descriptor.ExtensionRanges instead.
-func (*ServiceOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 {
- return extRange_ServiceOptions
-}
-
func (x *ServiceOptions) GetDeprecated() bool {
if x != nil && x.Deprecated != nil {
return *x.Deprecated
@@ -2306,15 +2262,6 @@
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17}
}
-var extRange_MethodOptions = []protoiface.ExtensionRangeV1{
- {Start: 1000, End: 536870911},
-}
-
-// Deprecated: Use MethodOptions.ProtoReflect.Descriptor.ExtensionRanges instead.
-func (*MethodOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 {
- return extRange_MethodOptions
-}
-
func (x *MethodOptions) GetDeprecated() bool {
if x != nil && x.Deprecated != nil {
return *x.Deprecated
@@ -3167,7 +3114,7 @@
0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80,
- 0x80, 0x80, 0x02, 0x22, 0x98, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73,
+ 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73,
0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04,
0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05,
@@ -3193,348 +3140,350 @@
0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50,
- 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59,
- 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59,
- 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59,
- 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54,
- 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54,
- 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a,
- 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12,
- 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f,
- 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12,
- 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12,
- 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10,
- 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10,
- 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32,
- 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10,
- 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44,
- 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49,
- 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f,
- 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45,
- 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62,
- 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49,
- 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f,
- 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41,
- 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x22, 0x63,
- 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
- 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e,
- 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
- 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
- 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
- 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65,
- 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
- 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72,
- 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76,
- 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72,
- 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c,
- 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11,
+ 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79,
+ 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c,
+ 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41,
+ 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36,
+ 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54,
+ 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54,
+ 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58,
+ 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46,
+ 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45,
+ 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f,
+ 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45,
+ 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45,
+ 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59,
+ 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59,
+ 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54,
+ 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59,
+ 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a,
+ 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10,
+ 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10,
+ 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34,
+ 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c,
+ 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12,
+ 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45,
+ 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50,
+ 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66,
+ 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+ 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a,
+ 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75,
+ 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61,
+ 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d,
+ 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67,
- 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
- 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e,
- 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
- 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75,
- 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62,
- 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22,
- 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
- 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e,
- 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
- 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39,
- 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65,
- 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
+ 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65,
+ 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74,
+ 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74,
+ 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65,
+ 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+ 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
+ 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
+ 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74,
- 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70,
- 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74,
- 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74,
- 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f,
- 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65,
- 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
- 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
- 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74,
- 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
- 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65,
- 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x92, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61,
- 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76,
- 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61,
- 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65,
- 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61,
- 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65,
- 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11,
- 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65,
- 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61,
- 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61,
- 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73,
- 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f,
- 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66,
- 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13,
- 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55,
- 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f,
- 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65,
- 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74,
- 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70,
- 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f,
- 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10,
- 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47,
- 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39,
- 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
- 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69,
- 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f,
+ 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+ 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52,
+ 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12,
+ 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c,
+ 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91,
+ 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21,
+ 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67,
+ 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f,
+ 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74,
+ 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08,
+ 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c,
+ 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61,
+ 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61,
+ 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28,
+ 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68,
+ 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f,
+ 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08,
+ 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72,
+ 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c,
+ 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53,
+ 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f,
+ 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18,
+ 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65,
+ 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
+ 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f,
0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
- 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70,
- 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
- 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f,
- 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05,
- 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69,
- 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70,
- 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
- 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
- 0x12, 0x2f, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72,
- 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73,
- 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61,
- 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f,
- 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62,
- 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a,
- 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e,
- 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66,
- 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
- 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70,
- 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18,
- 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50,
- 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d,
- 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68,
- 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68,
- 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d,
- 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65,
- 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b,
- 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
- 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
- 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
- 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a,
- 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a,
- 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45,
- 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f,
- 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80,
- 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x4d,
- 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a,
- 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72,
- 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05,
- 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65,
- 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e,
- 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53,
- 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
- 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70,
- 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
- 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
- 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20,
- 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x58, 0x0a,
- 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67,
+ 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a,
+ 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63,
+ 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a,
+ 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69,
+ 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70,
+ 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12,
+ 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
+ 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f,
+ 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20,
+ 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61,
+ 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a,
+ 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50,
+ 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f,
+ 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78,
+ 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65,
+ 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73,
+ 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70,
+ 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a,
+ 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61,
+ 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e,
+ 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79,
+ 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75,
+ 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69,
+ 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a,
+ 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01,
+ 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12,
+ 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10,
+ 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26,
+ 0x10, 0x27, 0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72,
+ 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61,
+ 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63,
+ 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
+ 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f,
+ 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65,
+ 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f,
+ 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
+ 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
+ 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e,
+ 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a,
+ 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09,
+ 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xe2, 0x03, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52,
+ 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61,
+ 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b,
+ 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52,
+ 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c,
+ 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
+ 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
+ 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73,
+ 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a,
+ 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
+ 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e,
+ 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65,
+ 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75,
+ 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53,
+ 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10,
+ 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43,
+ 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a,
+ 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09,
+ 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a,
+ 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10,
+ 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f,
+ 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75,
+ 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69,
+ 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02,
+ 0x22, 0xc0, 0x01, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61,
+ 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65,
+ 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e,
+ 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65,
+ 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75,
+ 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08,
+ 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72,
+ 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
+ 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
+ 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
+ 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
+ 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80,
+ 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65,
+ 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
+ 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58,
+ 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f,
+ 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
+ 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80,
+ 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
+ 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
+ 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11,
+ 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65,
+ 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65,
+ 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f,
+ 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69,
+ 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12,
+ 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
+ 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
+ 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65,
+ 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a,
+ 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b,
+ 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44,
+ 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49,
+ 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07,
+ 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74,
+ 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55,
0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
- 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80,
- 0x80, 0x02, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xe2,
- 0x03, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
- 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54,
- 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79,
- 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73,
- 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
- 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65,
- 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74,
- 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x25,
- 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65,
- 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20,
- 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b,
- 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
- 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
- 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54,
- 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12,
- 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52,
- 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a,
- 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d,
- 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e,
- 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52,
- 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08,
- 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
- 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
- 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
- 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08,
- 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc0, 0x01, 0x0a, 0x0b, 0x45, 0x6e, 0x75,
- 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f,
- 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61,
- 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70,
- 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
- 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
- 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
- 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
- 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10,
- 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10,
- 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
- 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74,
- 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
- 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e,
- 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a,
- 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
- 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20,
- 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72,
- 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
- 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7,
- 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
- 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69,
- 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d,
- 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a,
- 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28,
- 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
- 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65,
- 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32,
- 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
- 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c,
- 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e,
- 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e,
- 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74,
- 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
- 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e,
- 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79,
- 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54,
- 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13,
- 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54,
- 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e,
- 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a,
- 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
- 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50,
- 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65,
- 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56,
- 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65,
- 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04,
- 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c,
- 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69,
- 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10,
- 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65,
- 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61,
- 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e,
- 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67,
- 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a,
- 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e,
- 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08,
- 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65,
- 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b,
- 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e,
- 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44,
- 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
- 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42,
- 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61,
- 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61,
- 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d,
- 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61,
- 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11,
- 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74,
- 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e,
- 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61,
- 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f,
- 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65,
- 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d,
- 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd1, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61,
- 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49,
- 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a,
- 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x0a, 0x41, 0x6e,
- 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68,
- 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c,
- 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05,
- 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x42, 0x8f, 0x01, 0x0a, 0x13, 0x63, 0x6f,
- 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
- 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
- 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f,
- 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3b, 0x64, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x6f, 0x72, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02,
- 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65,
+ 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12,
+ 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69,
+ 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65,
+ 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65,
+ 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62,
+ 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b,
+ 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73,
+ 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27,
+ 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61,
+ 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50,
+ 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74,
+ 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74,
+ 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f,
+ 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a,
+ 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74,
+ 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74,
+ 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42,
+ 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61,
+ 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d,
+ 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67,
+ 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74,
+ 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74,
+ 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74,
+ 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd1, 0x01,
+ 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49,
+ 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02,
+ 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67,
+ 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12,
+ 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e,
+ 0x64, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02,
+ 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e,
}
var (
diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
index 5f9498e..8c10797 100644
--- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -31,12 +31,100 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/any.proto
+// Package anypb contains generated types for google/protobuf/any.proto.
+//
+// The Any message is a dynamic representation of any other message value.
+// It is functionally a tuple of the full name of the remote message type and
+// the serialized bytes of the remote message value.
+//
+//
+// Constructing an Any
+//
+// An Any message containing another message value is constructed using New:
+//
+// any, err := anypb.New(m)
+// if err != nil {
+// ... // handle error
+// }
+// ... // make use of any
+//
+//
+// Unmarshaling an Any
+//
+// With a populated Any message, the underlying message can be serialized into
+// a remote concrete message value in a few ways.
+//
+// If the exact concrete type is known, then a new (or pre-existing) instance
+// of that message can be passed to the UnmarshalTo method:
+//
+// m := new(foopb.MyMessage)
+// if err := any.UnmarshalTo(m); err != nil {
+// ... // handle error
+// }
+// ... // make use of m
+//
+// If the exact concrete type is not known, then the UnmarshalNew method can be
+// used to unmarshal the contents into a new instance of the remote message type:
+//
+// m, err := any.UnmarshalNew()
+// if err != nil {
+// ... // handle error
+// }
+// ... // make use of m
+//
+// UnmarshalNew uses the global type registry to resolve the message type and
+// construct a new instance of that message to unmarshal into. In order for a
+// message type to appear in the global registry, the Go type representing that
+// protobuf message type must be linked into the Go binary. For messages
+// generated by protoc-gen-go, this is achieved through an import of the
+// generated Go package representing a .proto file.
+//
+// A common pattern with UnmarshalNew is to use a type switch with the resulting
+// proto.Message value:
+//
+// switch m := m.(type) {
+// case *foopb.MyMessage:
+// ... // make use of m as a *foopb.MyMessage
+// case *barpb.OtherMessage:
+// ... // make use of m as a *barpb.OtherMessage
+// case *bazpb.SomeMessage:
+// ... // make use of m as a *bazpb.SomeMessage
+// }
+//
+// This pattern ensures that the generated packages containing the message types
+// listed in the case clauses are linked into the Go binary and therefore also
+// registered in the global registry.
+//
+//
+// Type checking an Any
+//
+// In order to type check whether an Any message represents some other message,
+// then use the MessageIs method:
+//
+// if any.MessageIs((*foopb.MyMessage)(nil)) {
+// ... // make use of any, knowing that it contains a foopb.MyMessage
+// }
+//
+// The MessageIs method can also be used with an allocated instance of the target
+// message type if the intention is to unmarshal into it if the type matches:
+//
+// m := new(foopb.MyMessage)
+// if any.MessageIs(m) {
+// if err := any.UnmarshalTo(m); err != nil {
+// ... // handle error
+// }
+// ... // make use of m
+// }
+//
package anypb
import (
+ proto "google.golang.org/protobuf/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoregistry "google.golang.org/protobuf/reflect/protoregistry"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
+ strings "strings"
sync "sync"
)
@@ -78,10 +166,13 @@
// Example 4: Pack and unpack a message in Go
//
// foo := &pb.Foo{...}
-// any, err := ptypes.MarshalAny(foo)
+// any, err := anypb.New(foo)
+// if err != nil {
+// ...
+// }
// ...
// foo := &pb.Foo{}
-// if err := ptypes.UnmarshalAny(any, foo); err != nil {
+// if err := any.UnmarshalTo(foo); err != nil {
// ...
// }
//
@@ -158,6 +249,125 @@
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
+// New marshals src into a new Any instance.
+func New(src proto.Message) (*Any, error) {
+ dst := new(Any)
+ if err := dst.MarshalFrom(src); err != nil {
+ return nil, err
+ }
+ return dst, nil
+}
+
+// MarshalFrom marshals src into dst as the underlying message
+// using the provided marshal options.
+//
+// If no options are specified, call dst.MarshalFrom instead.
+func MarshalFrom(dst *Any, src proto.Message, opts proto.MarshalOptions) error {
+ const urlPrefix = "type.googleapis.com/"
+ if src == nil {
+ return protoimpl.X.NewError("invalid nil source message")
+ }
+ b, err := opts.Marshal(src)
+ if err != nil {
+ return err
+ }
+ dst.TypeUrl = urlPrefix + string(src.ProtoReflect().Descriptor().FullName())
+ dst.Value = b
+ return nil
+}
+
+// UnmarshalTo unmarshals the underlying message from src into dst
+// using the provided unmarshal options.
+// It reports an error if dst is not of the right message type.
+//
+// If no options are specified, call src.UnmarshalTo instead.
+func UnmarshalTo(src *Any, dst proto.Message, opts proto.UnmarshalOptions) error {
+ if src == nil {
+ return protoimpl.X.NewError("invalid nil source message")
+ }
+ if !src.MessageIs(dst) {
+ got := dst.ProtoReflect().Descriptor().FullName()
+ want := src.MessageName()
+ return protoimpl.X.NewError("mismatched message type: got %q, want %q", got, want)
+ }
+ return opts.Unmarshal(src.GetValue(), dst)
+}
+
+// UnmarshalNew unmarshals the underlying message from src into dst,
+// which is newly created message using a type resolved from the type URL.
+// The message type is resolved according to opt.Resolver,
+// which should implement protoregistry.MessageTypeResolver.
+// It reports an error if the underlying message type could not be resolved.
+//
+// If no options are specified, call src.UnmarshalNew instead.
+func UnmarshalNew(src *Any, opts proto.UnmarshalOptions) (dst proto.Message, err error) {
+ if src.GetTypeUrl() == "" {
+ return nil, protoimpl.X.NewError("invalid empty type URL")
+ }
+ if opts.Resolver == nil {
+ opts.Resolver = protoregistry.GlobalTypes
+ }
+ r, ok := opts.Resolver.(protoregistry.MessageTypeResolver)
+ if !ok {
+ return nil, protoregistry.NotFound
+ }
+ mt, err := r.FindMessageByURL(src.GetTypeUrl())
+ if err != nil {
+ if err == protoregistry.NotFound {
+ return nil, err
+ }
+ return nil, protoimpl.X.NewError("could not resolve %q: %v", src.GetTypeUrl(), err)
+ }
+ dst = mt.New().Interface()
+ return dst, opts.Unmarshal(src.GetValue(), dst)
+}
+
+// MessageIs reports whether the underlying message is of the same type as m.
+func (x *Any) MessageIs(m proto.Message) bool {
+ if m == nil {
+ return false
+ }
+ url := x.GetTypeUrl()
+ name := string(m.ProtoReflect().Descriptor().FullName())
+ if !strings.HasSuffix(url, name) {
+ return false
+ }
+ return len(url) == len(name) || url[len(url)-len(name)-1] == '/'
+}
+
+// MessageName reports the full name of the underlying message,
+// returning an empty string if invalid.
+func (x *Any) MessageName() protoreflect.FullName {
+ url := x.GetTypeUrl()
+ name := protoreflect.FullName(url)
+ if i := strings.LastIndexByte(url, '/'); i >= 0 {
+ name = name[i+len("/"):]
+ }
+ if !name.IsValid() {
+ return ""
+ }
+ return name
+}
+
+// MarshalFrom marshals m into x as the underlying message.
+func (x *Any) MarshalFrom(m proto.Message) error {
+ return MarshalFrom(x, m, proto.MarshalOptions{})
+}
+
+// UnmarshalTo unmarshals the contents of the underlying message of x into m.
+// It resets m before performing the unmarshal operation.
+// It reports an error if m is not of the right message type.
+func (x *Any) UnmarshalTo(m proto.Message) error {
+ return UnmarshalTo(x, m, proto.UnmarshalOptions{})
+}
+
+// UnmarshalNew unmarshals the contents of the underlying message of x into
+// a newly allocated message of the specified type.
+// It reports an error if the underlying message type could not be resolved.
+func (x *Any) UnmarshalNew() (proto.Message, error) {
+ return UnmarshalNew(x, proto.UnmarshalOptions{})
+}
+
func (x *Any) Reset() {
*x = Any{}
if protoimpl.UnsafeEnabled {
@@ -213,14 +423,15 @@
0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14,
0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x42, 0x6f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79,
- 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x25, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0xa2, 0x02,
- 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e,
- 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f,
+ 0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65,
+ 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
index 3997c60..a583ca2 100644
--- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
@@ -31,13 +31,58 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/duration.proto
+// Package durationpb contains generated types for google/protobuf/duration.proto.
+//
+// The Duration message represents a signed span of time.
+//
+//
+// Conversion to a Go Duration
+//
+// The AsDuration method can be used to convert a Duration message to a
+// standard Go time.Duration value:
+//
+// d := dur.AsDuration()
+// ... // make use of d as a time.Duration
+//
+// Converting to a time.Duration is a common operation so that the extensive
+// set of time-based operations provided by the time package can be leveraged.
+// See https://golang.org/pkg/time for more information.
+//
+// The AsDuration method performs the conversion on a best-effort basis.
+// Durations with denormal values (e.g., nanoseconds beyond -99999999 and
+// +99999999, inclusive; or seconds and nanoseconds with opposite signs)
+// are normalized during the conversion to a time.Duration. To manually check for
+// invalid Duration per the documented limitations in duration.proto,
+// additionally call the CheckValid method:
+//
+// if err := dur.CheckValid(); err != nil {
+// ... // handle error
+// }
+//
+// Note that the documented limitations in duration.proto does not protect a
+// Duration from overflowing the representable range of a time.Duration in Go.
+// The AsDuration method uses saturation arithmetic such that an overflow clamps
+// the resulting value to the closest representable value (e.g., math.MaxInt64
+// for positive overflow and math.MinInt64 for negative overflow).
+//
+//
+// Conversion from a Go Duration
+//
+// The durationpb.New function can be used to construct a Duration message
+// from a standard Go time.Duration value:
+//
+// dur := durationpb.New(d)
+// ... // make use of d as a *durationpb.Duration
+//
package durationpb
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ math "math"
reflect "reflect"
sync "sync"
+ time "time"
)
// A Duration represents a signed, fixed-length span of time represented
@@ -118,6 +163,91 @@
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
}
+// New constructs a new Duration from the provided time.Duration.
+func New(d time.Duration) *Duration {
+ nanos := d.Nanoseconds()
+ secs := nanos / 1e9
+ nanos -= secs * 1e9
+ return &Duration{Seconds: int64(secs), Nanos: int32(nanos)}
+}
+
+// AsDuration converts x to a time.Duration,
+// returning the closest duration value in the event of overflow.
+func (x *Duration) AsDuration() time.Duration {
+ secs := x.GetSeconds()
+ nanos := x.GetNanos()
+ d := time.Duration(secs) * time.Second
+ overflow := d/time.Second != time.Duration(secs)
+ d += time.Duration(nanos) * time.Nanosecond
+ overflow = overflow || (secs < 0 && nanos < 0 && d > 0)
+ overflow = overflow || (secs > 0 && nanos > 0 && d < 0)
+ if overflow {
+ switch {
+ case secs < 0:
+ return time.Duration(math.MinInt64)
+ case secs > 0:
+ return time.Duration(math.MaxInt64)
+ }
+ }
+ return d
+}
+
+// IsValid reports whether the duration is valid.
+// It is equivalent to CheckValid == nil.
+func (x *Duration) IsValid() bool {
+ return x.check() == 0
+}
+
+// CheckValid returns an error if the duration is invalid.
+// In particular, it checks whether the value is within the range of
+// -10000 years to +10000 years inclusive.
+// An error is reported for a nil Duration.
+func (x *Duration) CheckValid() error {
+ switch x.check() {
+ case invalidNil:
+ return protoimpl.X.NewError("invalid nil Duration")
+ case invalidUnderflow:
+ return protoimpl.X.NewError("duration (%v) exceeds -10000 years", x)
+ case invalidOverflow:
+ return protoimpl.X.NewError("duration (%v) exceeds +10000 years", x)
+ case invalidNanosRange:
+ return protoimpl.X.NewError("duration (%v) has out-of-range nanos", x)
+ case invalidNanosSign:
+ return protoimpl.X.NewError("duration (%v) has seconds and nanos with different signs", x)
+ default:
+ return nil
+ }
+}
+
+const (
+ _ = iota
+ invalidNil
+ invalidUnderflow
+ invalidOverflow
+ invalidNanosRange
+ invalidNanosSign
+)
+
+func (x *Duration) check() uint {
+ const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min
+ secs := x.GetSeconds()
+ nanos := x.GetNanos()
+ switch {
+ case x == nil:
+ return invalidNil
+ case secs < -absDuration:
+ return invalidUnderflow
+ case secs > +absDuration:
+ return invalidOverflow
+ case nanos <= -1e9 || nanos >= +1e9:
+ return invalidNanosRange
+ case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0):
+ return invalidNanosSign
+ default:
+ return 0
+ }
+}
+
func (x *Duration) Reset() {
*x = Duration{}
if protoimpl.UnsafeEnabled {
@@ -173,16 +303,16 @@
0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a,
0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07,
0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x7c, 0x0a,
- 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
- 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c,
- 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x33,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01,
+ 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
+ 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47,
+ 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79,
+ 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
index 32a583d..e7fcea3 100644
--- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
@@ -93,15 +93,15 @@
0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07,
- 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
+ 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x7d, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a,
- 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69,
- 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f,
- 0x65, 0x6d, 0x70, 0x74, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02,
- 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62,
- 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b,
+ 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2,
+ 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77,
+ 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
index 6fe6d42..c9ae921 100644
--- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -31,6 +31,48 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/timestamp.proto
+// Package timestamppb contains generated types for google/protobuf/timestamp.proto.
+//
+// The Timestamp message represents a timestamp,
+// an instant in time since the Unix epoch (January 1st, 1970).
+//
+//
+// Conversion to a Go Time
+//
+// The AsTime method can be used to convert a Timestamp message to a
+// standard Go time.Time value in UTC:
+//
+// t := ts.AsTime()
+// ... // make use of t as a time.Time
+//
+// Converting to a time.Time is a common operation so that the extensive
+// set of time-based operations provided by the time package can be leveraged.
+// See https://golang.org/pkg/time for more information.
+//
+// The AsTime method performs the conversion on a best-effort basis. Timestamps
+// with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive)
+// are normalized during the conversion to a time.Time. To manually check for
+// invalid Timestamps per the documented limitations in timestamp.proto,
+// additionally call the CheckValid method:
+//
+// if err := ts.CheckValid(); err != nil {
+// ... // handle error
+// }
+//
+//
+// Conversion from a Go Time
+//
+// The timestamppb.New function can be used to construct a Timestamp message
+// from a standard Go time.Time value:
+//
+// ts := timestamppb.New(t)
+// ... // make use of ts as a *timestamppb.Timestamp
+//
+// In order to construct a Timestamp representing the current time, use Now:
+//
+// ts := timestamppb.Now()
+// ... // make use of ts as a *timestamppb.Timestamp
+//
package timestamppb
import (
@@ -38,6 +80,7 @@
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
+ time "time"
)
// A Timestamp represents a point in time independent of any time zone or local
@@ -91,7 +134,16 @@
// .setNanos((int) ((millis % 1000) * 1000000)).build();
//
//
-// Example 5: Compute Timestamp from current time in Python.
+// Example 5: Compute Timestamp from Java `Instant.now()`.
+//
+// Instant now = Instant.now();
+//
+// Timestamp timestamp =
+// Timestamp.newBuilder().setSeconds(now.getEpochSecond())
+// .setNanos(now.getNano()).build();
+//
+//
+// Example 6: Compute Timestamp from current time in Python.
//
// timestamp = Timestamp()
// timestamp.GetCurrentTime()
@@ -140,6 +192,73 @@
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
}
+// Now constructs a new Timestamp from the current time.
+func Now() *Timestamp {
+ return New(time.Now())
+}
+
+// New constructs a new Timestamp from the provided time.Time.
+func New(t time.Time) *Timestamp {
+ return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())}
+}
+
+// AsTime converts x to a time.Time.
+func (x *Timestamp) AsTime() time.Time {
+ return time.Unix(int64(x.GetSeconds()), int64(x.GetNanos())).UTC()
+}
+
+// IsValid reports whether the timestamp is valid.
+// It is equivalent to CheckValid == nil.
+func (x *Timestamp) IsValid() bool {
+ return x.check() == 0
+}
+
+// CheckValid returns an error if the timestamp is invalid.
+// In particular, it checks whether the value represents a date that is
+// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive.
+// An error is reported for a nil Timestamp.
+func (x *Timestamp) CheckValid() error {
+ switch x.check() {
+ case invalidNil:
+ return protoimpl.X.NewError("invalid nil Timestamp")
+ case invalidUnderflow:
+ return protoimpl.X.NewError("timestamp (%v) before 0001-01-01", x)
+ case invalidOverflow:
+ return protoimpl.X.NewError("timestamp (%v) after 9999-12-31", x)
+ case invalidNanos:
+ return protoimpl.X.NewError("timestamp (%v) has out-of-range nanos", x)
+ default:
+ return nil
+ }
+}
+
+const (
+ _ = iota
+ invalidNil
+ invalidUnderflow
+ invalidOverflow
+ invalidNanos
+)
+
+func (x *Timestamp) check() uint {
+ const minTimestamp = -62135596800 // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive
+ const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive
+ secs := x.GetSeconds()
+ nanos := x.GetNanos()
+ switch {
+ case x == nil:
+ return invalidNil
+ case secs < minTimestamp:
+ return invalidUnderflow
+ case secs > maxTimestamp:
+ return invalidOverflow
+ case nanos < 0 || nanos >= 1e9:
+ return invalidNanos
+ default:
+ return 0
+ }
+}
+
func (x *Timestamp) Reset() {
*x = Timestamp{}
if protoimpl.UnsafeEnabled {
@@ -196,15 +315,15 @@
0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e,
0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42,
- 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
- 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
- 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02,
- 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62,
- 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77,
+ 0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01,
+ 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f,
+ 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (