[SEBA-660] : Adding Igmp support in BbSim
Change-Id: I9f5c7d8ad39ac82850b04e2c997996d6c47b32d2
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index 84e31a2..f58740b 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -31,22 +31,23 @@
"time"
"google.golang.org/grpc/balancer"
- _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
+ "google.golang.org/grpc/balancer/base"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/backoff"
"google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/envconfig"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/resolver"
- _ "google.golang.org/grpc/resolver/dns" // To register dns resolver.
- _ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver.
"google.golang.org/grpc/serviceconfig"
"google.golang.org/grpc/status"
+
+ _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
+ _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver.
+ _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver.
)
const (
@@ -187,11 +188,11 @@
}
if cc.dopts.defaultServiceConfigRawJSON != nil {
- sc, err := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON)
- if err != nil {
- return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, err)
+ scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON)
+ if scpr.Err != nil {
+ return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err)
}
- cc.dopts.defaultServiceConfig = sc
+ cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig)
}
cc.mkp = cc.dopts.copts.KeepaliveParams
@@ -236,29 +237,28 @@
}
}
if cc.dopts.bs == nil {
- cc.dopts.bs = backoff.Exponential{
- MaxDelay: DefaultBackoffConfig.MaxDelay,
+ cc.dopts.bs = backoff.DefaultExponential
+ }
+
+ // Determine the resolver to use.
+ cc.parsedTarget = parseTarget(cc.target)
+ grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme)
+ resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme)
+ if resolverBuilder == nil {
+ // If resolver builder is still nil, the parsed target's scheme is
+ // not registered. Fallback to default resolver and set Endpoint to
+ // the original target.
+ grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
+ cc.parsedTarget = resolver.Target{
+ Scheme: resolver.GetDefaultScheme(),
+ Endpoint: target,
+ }
+ resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme)
+ if resolverBuilder == nil {
+ return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme)
}
}
- if cc.dopts.resolverBuilder == nil {
- // Only try to parse target when resolver builder is not already set.
- cc.parsedTarget = parseTarget(cc.target)
- grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme)
- cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
- if cc.dopts.resolverBuilder == nil {
- // If resolver builder is still nil, the parsed target's scheme is
- // not registered. Fallback to default resolver and set Endpoint to
- // the original target.
- grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
- cc.parsedTarget = resolver.Target{
- Scheme: resolver.GetDefaultScheme(),
- Endpoint: target,
- }
- cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
- }
- } else {
- cc.parsedTarget = resolver.Target{Endpoint: target}
- }
+
creds := cc.dopts.copts.TransportCredentials
if creds != nil && creds.Info().ServerName != "" {
cc.authority = creds.Info().ServerName
@@ -298,14 +298,14 @@
}
// Build the resolver.
- rWrapper, err := newCCResolverWrapper(cc)
+ rWrapper, err := newCCResolverWrapper(cc, resolverBuilder)
if err != nil {
return nil, fmt.Errorf("failed to build resolver: %v", err)
}
-
cc.mu.Lock()
cc.resolverWrapper = rWrapper
cc.mu.Unlock()
+
// A blocking dial blocks until the clientConn is ready.
if cc.dopts.block {
for {
@@ -444,7 +444,32 @@
return csm.notifyChan
}
-// ClientConn represents a client connection to an RPC server.
+// ClientConnInterface defines the functions clients need to perform unary and
+// streaming RPCs. It is implemented by *ClientConn, and is only intended to
+// be referenced by generated code.
+type ClientConnInterface interface {
+ // Invoke performs a unary RPC and returns after the response is received
+ // into reply.
+ Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error
+ // NewStream begins a streaming RPC.
+ NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error)
+}
+
+// Assert *ClientConn implements ClientConnInterface.
+var _ ClientConnInterface = (*ClientConn)(nil)
+
+// ClientConn represents a virtual connection to a conceptual endpoint, to
+// perform RPCs.
+//
+// A ClientConn is free to have zero or more actual connections to the endpoint
+// based on configuration, load, etc. It is also free to determine which actual
+// endpoints to use and may change it every RPC, permitting client-side load
+// balancing.
+//
+// A ClientConn encapsulates a range of functionality including name
+// resolution, TCP connection establishment (with retries and backoff) and TLS
+// handshakes. It also handles errors on established connections by
+// re-resolving the name and reconnecting.
type ClientConn struct {
ctx context.Context
cancel context.CancelFunc
@@ -533,58 +558,104 @@
}
}
-func (cc *ClientConn) updateResolverState(s resolver.State) error {
+var emptyServiceConfig *ServiceConfig
+
+func init() {
+ cfg := parseServiceConfig("{}")
+ if cfg.Err != nil {
+ panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err))
+ }
+ emptyServiceConfig = cfg.Config.(*ServiceConfig)
+}
+
+func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) {
+ if cc.sc != nil {
+ cc.applyServiceConfigAndBalancer(cc.sc, addrs)
+ return
+ }
+ if cc.dopts.defaultServiceConfig != nil {
+ cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, addrs)
+ } else {
+ cc.applyServiceConfigAndBalancer(emptyServiceConfig, addrs)
+ }
+}
+
+func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
+ defer cc.firstResolveEvent.Fire()
cc.mu.Lock()
- defer cc.mu.Unlock()
// Check if the ClientConn is already closed. Some fields (e.g.
// balancerWrapper) are set to nil when closing the ClientConn, and could
// cause nil pointer panic if we don't have this check.
if cc.conns == nil {
+ cc.mu.Unlock()
return nil
}
- if cc.dopts.disableServiceConfig || s.ServiceConfig == nil {
- if cc.dopts.defaultServiceConfig != nil && cc.sc == nil {
- cc.applyServiceConfig(cc.dopts.defaultServiceConfig)
+ if err != nil {
+ // May need to apply the initial service config in case the resolver
+ // doesn't support service configs, or doesn't provide a service config
+ // with the new addresses.
+ cc.maybeApplyDefaultServiceConfig(nil)
+
+ if cc.balancerWrapper != nil {
+ cc.balancerWrapper.resolverError(err)
}
- } else if sc, ok := s.ServiceConfig.(*ServiceConfig); ok {
- cc.applyServiceConfig(sc)
+
+ // No addresses are valid with err set; return early.
+ cc.mu.Unlock()
+ return balancer.ErrBadResolverState
+ }
+
+ var ret error
+ if cc.dopts.disableServiceConfig || s.ServiceConfig == nil {
+ cc.maybeApplyDefaultServiceConfig(s.Addresses)
+ // TODO: do we need to apply a failing LB policy if there is no
+ // default, per the error handling design?
+ } else {
+ if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok {
+ cc.applyServiceConfigAndBalancer(sc, s.Addresses)
+ } else {
+ ret = balancer.ErrBadResolverState
+ if cc.balancerWrapper == nil {
+ var err error
+ if s.ServiceConfig.Err != nil {
+ err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err)
+ } else {
+ err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config)
+ }
+ cc.blockingpicker.updatePicker(base.NewErrPicker(err))
+ cc.csMgr.updateState(connectivity.TransientFailure)
+ cc.mu.Unlock()
+ return ret
+ }
+ }
}
var balCfg serviceconfig.LoadBalancingConfig
- if cc.dopts.balancerBuilder == nil {
- // Only look at balancer types and switch balancer if balancer dial
- // option is not set.
- var newBalancerName string
- if cc.sc != nil && cc.sc.lbConfig != nil {
- newBalancerName = cc.sc.lbConfig.name
- balCfg = cc.sc.lbConfig.cfg
- } else {
- var isGRPCLB bool
- for _, a := range s.Addresses {
- if a.Type == resolver.GRPCLB {
- isGRPCLB = true
- break
- }
- }
- if isGRPCLB {
- newBalancerName = grpclbName
- } else if cc.sc != nil && cc.sc.LB != nil {
- newBalancerName = *cc.sc.LB
- } else {
- newBalancerName = PickFirstBalancerName
- }
- }
- cc.switchBalancer(newBalancerName)
- } else if cc.balancerWrapper == nil {
- // Balancer dial option was set, and this is the first time handling
- // resolved addresses. Build a balancer with dopts.balancerBuilder.
- cc.curBalancerName = cc.dopts.balancerBuilder.Name()
- cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
+ if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil {
+ balCfg = cc.sc.lbConfig.cfg
}
- cc.balancerWrapper.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
- return nil
+ cbn := cc.curBalancerName
+ bw := cc.balancerWrapper
+ cc.mu.Unlock()
+ if cbn != grpclbName {
+ // Filter any grpclb addresses since we don't have the grpclb balancer.
+ for i := 0; i < len(s.Addresses); {
+ if s.Addresses[i].Type == resolver.GRPCLB {
+ copy(s.Addresses[i:], s.Addresses[i+1:])
+ s.Addresses = s.Addresses[:len(s.Addresses)-1]
+ continue
+ }
+ i++
+ }
+ }
+ uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
+ if ret == nil {
+ ret = uccsErr // prefer ErrBadResolver state since any other error is
+ // currently meaningless to the caller.
+ }
+ return ret
}
// switchBalancer starts the switching from current balancer to the balancer
@@ -632,7 +703,7 @@
cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts)
}
-func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
cc.mu.Lock()
if cc.conns == nil {
cc.mu.Unlock()
@@ -640,7 +711,7 @@
}
// TODO(bar switching) send updates to all balancer wrappers when balancer
// gracefully switching is supported.
- cc.balancerWrapper.handleSubConnStateChange(sc, s)
+ cc.balancerWrapper.handleSubConnStateChange(sc, s, err)
cc.mu.Unlock()
}
@@ -737,7 +808,7 @@
}
// Update connectivity state within the lock to prevent subsequent or
// concurrent calls from resetting the transport more than once.
- ac.updateConnectivityState(connectivity.Connecting)
+ ac.updateConnectivityState(connectivity.Connecting, nil)
ac.mu.Unlock()
// Start a goroutine connecting to the server asynchronously.
@@ -823,7 +894,8 @@
}
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
- t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{
+ t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{
+ Ctx: ctx,
FullMethodName: method,
})
if err != nil {
@@ -832,10 +904,10 @@
return t, done, nil
}
-func (cc *ClientConn) applyServiceConfig(sc *ServiceConfig) error {
+func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []resolver.Address) {
if sc == nil {
// should never reach here.
- return fmt.Errorf("got nil pointer for service config")
+ return
}
cc.sc = sc
@@ -851,10 +923,38 @@
cc.retryThrottler.Store((*retryThrottler)(nil))
}
- return nil
+ if cc.dopts.balancerBuilder == nil {
+ // Only look at balancer types and switch balancer if balancer dial
+ // option is not set.
+ var newBalancerName string
+ if cc.sc != nil && cc.sc.lbConfig != nil {
+ newBalancerName = cc.sc.lbConfig.name
+ } else {
+ var isGRPCLB bool
+ for _, a := range addrs {
+ if a.Type == resolver.GRPCLB {
+ isGRPCLB = true
+ break
+ }
+ }
+ if isGRPCLB {
+ newBalancerName = grpclbName
+ } else if cc.sc != nil && cc.sc.LB != nil {
+ newBalancerName = *cc.sc.LB
+ } else {
+ newBalancerName = PickFirstBalancerName
+ }
+ }
+ cc.switchBalancer(newBalancerName)
+ } else if cc.balancerWrapper == nil {
+ // Balancer dial option was set, and this is the first time handling
+ // resolved addresses. Build a balancer with dopts.balancerBuilder.
+ cc.curBalancerName = cc.dopts.balancerBuilder.Name()
+ cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
+ }
}
-func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) {
+func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
cc.mu.RLock()
r := cc.resolverWrapper
cc.mu.RUnlock()
@@ -876,8 +976,9 @@
// This API is EXPERIMENTAL.
func (cc *ClientConn) ResetConnectBackoff() {
cc.mu.Lock()
- defer cc.mu.Unlock()
- for ac := range cc.conns {
+ conns := cc.conns
+ cc.mu.Unlock()
+ for ac := range conns {
ac.resetConnectBackoff()
}
}
@@ -963,7 +1064,7 @@
}
// Note: this requires a lock on ac.mu.
-func (ac *addrConn) updateConnectivityState(s connectivity.State) {
+func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) {
if ac.state == s {
return
}
@@ -976,7 +1077,7 @@
Severity: channelz.CtINFO,
})
}
- ac.cc.handleSubConnStateChange(ac.acbw, s)
+ ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr)
}
// adjustParams updates parameters used to create transports upon
@@ -996,7 +1097,7 @@
func (ac *addrConn) resetTransport() {
for i := 0; ; i++ {
if i > 0 {
- ac.cc.resolveNow(resolver.ResolveNowOption{})
+ ac.cc.resolveNow(resolver.ResolveNowOptions{})
}
ac.mu.Lock()
@@ -1025,7 +1126,7 @@
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm
connectDeadline := time.Now().Add(dialDuration)
- ac.updateConnectivityState(connectivity.Connecting)
+ ac.updateConnectivityState(connectivity.Connecting, nil)
ac.transport = nil
ac.mu.Unlock()
@@ -1038,7 +1139,7 @@
ac.mu.Unlock()
return
}
- ac.updateConnectivityState(connectivity.TransientFailure)
+ ac.updateConnectivityState(connectivity.TransientFailure, err)
// Backoff.
b := ac.resetBackoff
@@ -1061,8 +1162,8 @@
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
- newTr.Close()
ac.mu.Unlock()
+ newTr.Close()
return
}
ac.curAddr = addr
@@ -1077,20 +1178,16 @@
// we restart from the top of the addr list.
<-reconnect.Done()
hcancel()
-
- // Need to reconnect after a READY, the addrConn enters
- // TRANSIENT_FAILURE.
+ // restart connecting - the top of the loop will set state to
+ // CONNECTING. This is against the current connectivity semantics doc,
+ // however it allows for graceful behavior for RPCs not yet dispatched
+ // - unfortunate timing would otherwise lead to the RPC failing even
+ // though the TRANSIENT_FAILURE state (called for by the doc) would be
+ // instantaneous.
//
- // This will set addrConn to TRANSIENT_FAILURE for a very short period
- // of time, and turns CONNECTING. It seems reasonable to skip this, but
- // READY-CONNECTING is not a valid transition.
- ac.mu.Lock()
- if ac.state == connectivity.Shutdown {
- ac.mu.Unlock()
- return
- }
- ac.updateConnectivityState(connectivity.TransientFailure)
- ac.mu.Unlock()
+ // Ideally we should transition to Idle here and block until there is
+ // RPC activity that leads to the balancer requesting a reconnect of
+ // the associated SubConn.
}
}
@@ -1098,6 +1195,7 @@
// first successful one. It returns the transport, the address and a Event in
// the successful case. The Event fires when the returned transport disconnects.
func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) {
+ var firstConnErr error
for _, addr := range addrs {
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
@@ -1126,11 +1224,14 @@
if err == nil {
return newTr, addr, reconnect, nil
}
+ if firstConnErr == nil {
+ firstConnErr = err
+ }
ac.cc.blockingpicker.updateConnectionError(err)
}
// Couldn't connect to any address.
- return nil, resolver.Address{}, nil, fmt.Errorf("couldn't connect to any address")
+ return nil, resolver.Address{}, nil, firstConnErr
}
// createTransport creates a connection to addr. It returns the transport and a
@@ -1141,20 +1242,47 @@
onCloseCalled := make(chan struct{})
reconnect := grpcsync.NewEvent()
+ authority := ac.cc.authority
+ // addr.ServerName takes precedent over ClientConn authority, if present.
+ if addr.ServerName != "" {
+ authority = addr.ServerName
+ }
+
target := transport.TargetInfo{
Addr: addr.Addr,
Metadata: addr.Metadata,
- Authority: ac.cc.authority,
+ Authority: authority,
}
+ once := sync.Once{}
onGoAway := func(r transport.GoAwayReason) {
ac.mu.Lock()
ac.adjustParams(r)
+ once.Do(func() {
+ if ac.state == connectivity.Ready {
+ // Prevent this SubConn from being used for new RPCs by setting its
+ // state to Connecting.
+ //
+ // TODO: this should be Idle when grpc-go properly supports it.
+ ac.updateConnectivityState(connectivity.Connecting, nil)
+ }
+ })
ac.mu.Unlock()
reconnect.Fire()
}
onClose := func() {
+ ac.mu.Lock()
+ once.Do(func() {
+ if ac.state == connectivity.Ready {
+ // Prevent this SubConn from being used for new RPCs by setting its
+ // state to Connecting.
+ //
+ // TODO: this should be Idle when grpc-go properly supports it.
+ ac.updateConnectivityState(connectivity.Connecting, nil)
+ }
+ })
+ ac.mu.Unlock()
close(onCloseCalled)
reconnect.Fire()
}
@@ -1176,20 +1304,18 @@
return nil, nil, err
}
- if ac.dopts.reqHandshake == envconfig.RequireHandshakeOn {
- select {
- case <-time.After(connectDeadline.Sub(time.Now())):
- // We didn't get the preface in time.
- newTr.Close()
- grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
- return nil, nil, errors.New("timed out waiting for server handshake")
- case <-prefaceReceived:
- // We got the preface - huzzah! things are good.
- case <-onCloseCalled:
- // The transport has already closed - noop.
- return nil, nil, errors.New("connection closed")
- // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix.
- }
+ select {
+ case <-time.After(time.Until(connectDeadline)):
+ // We didn't get the preface in time.
+ newTr.Close()
+ grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
+ return nil, nil, errors.New("timed out waiting for server handshake")
+ case <-prefaceReceived:
+ // We got the preface - huzzah! things are good.
+ case <-onCloseCalled:
+ // The transport has already closed - noop.
+ return nil, nil, errors.New("connection closed")
+ // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix.
}
return newTr, reconnect, nil
}
@@ -1210,7 +1336,7 @@
var healthcheckManagingState bool
defer func() {
if !healthcheckManagingState {
- ac.updateConnectivityState(connectivity.Ready)
+ ac.updateConnectivityState(connectivity.Ready, nil)
}
}()
@@ -1246,13 +1372,13 @@
ac.mu.Unlock()
return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac)
}
- setConnectivityState := func(s connectivity.State) {
+ setConnectivityState := func(s connectivity.State, lastErr error) {
ac.mu.Lock()
defer ac.mu.Unlock()
if ac.transport != currentTr {
return
}
- ac.updateConnectivityState(s)
+ ac.updateConnectivityState(s, lastErr)
}
// Start the health checking stream.
go func() {
@@ -1317,8 +1443,8 @@
curTr := ac.transport
ac.transport = nil
// We have to set the state to Shutdown before anything else to prevent races
- // between setting the state and logic that waits on context cancelation / etc.
- ac.updateConnectivityState(connectivity.Shutdown)
+ // between setting the state and logic that waits on context cancellation / etc.
+ ac.updateConnectivityState(connectivity.Shutdown, nil)
ac.cancel()
ac.curAddr = resolver.Address{}
if err == errConnDrain && curTr != nil {
@@ -1341,7 +1467,7 @@
},
})
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
- // the entity beng deleted, and thus prevent it from being deleted right away.
+ // the entity being deleted, and thus prevent it from being deleted right away.
channelz.RemoveEntry(ac.channelzID)
}
ac.mu.Unlock()
@@ -1431,3 +1557,12 @@
// Deprecated: This error is never returned by grpc and should not be
// referenced by users.
var ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
+
+func (cc *ClientConn) getResolver(scheme string) resolver.Builder {
+ for _, rb := range cc.dopts.resolvers {
+ if cc.parsedTarget.Scheme == rb.Scheme() {
+ return rb
+ }
+ }
+ return resolver.Get(cc.parsedTarget.Scheme)
+}