[VOL-5486] Fix deprecated versions
Change-Id: I3e03ea246020547ae75fa92ce8cf5cbba7e8f3bb
Signed-off-by: Abhay Kumar <abhay.kumar@radisys.com>
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index b640deb..bdc5520 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -176,44 +176,15 @@
// so that we don't embed a Mutex in this struct, which will make the
// struct non-copyable, which might break some callers.
state *serverInternalState
-
- // Synchronization group used for testing.
- // Outside of tests, this is nil.
- group synctestGroupInterface
-}
-
-func (s *Server) markNewGoroutine() {
- if s.group != nil {
- s.group.Join()
- }
-}
-
-func (s *Server) now() time.Time {
- if s.group != nil {
- return s.group.Now()
- }
- return time.Now()
-}
-
-// newTimer creates a new time.Timer, or a synthetic timer in tests.
-func (s *Server) newTimer(d time.Duration) timer {
- if s.group != nil {
- return s.group.NewTimer(d)
- }
- return timeTimer{time.NewTimer(d)}
-}
-
-// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
-func (s *Server) afterFunc(d time.Duration, f func()) timer {
- if s.group != nil {
- return s.group.AfterFunc(d, f)
- }
- return timeTimer{time.AfterFunc(d, f)}
}
type serverInternalState struct {
mu sync.Mutex
activeConns map[*serverConn]struct{}
+
+ // Pool of error channels. This is per-Server rather than global
+ // because channels can't be reused across synctest bubbles.
+ errChanPool sync.Pool
}
func (s *serverInternalState) registerConn(sc *serverConn) {
@@ -245,6 +216,27 @@
s.mu.Unlock()
}
+// Global error channel pool used for uninitialized Servers.
+// We use a per-Server pool when possible to avoid using channels across synctest bubbles.
+var errChanPool = sync.Pool{
+ New: func() any { return make(chan error, 1) },
+}
+
+func (s *serverInternalState) getErrChan() chan error {
+ if s == nil {
+ return errChanPool.Get().(chan error) // Server used without calling ConfigureServer
+ }
+ return s.errChanPool.Get().(chan error)
+}
+
+func (s *serverInternalState) putErrChan(ch chan error) {
+ if s == nil {
+ errChanPool.Put(ch) // Server used without calling ConfigureServer
+ return
+ }
+ s.errChanPool.Put(ch)
+}
+
// ConfigureServer adds HTTP/2 support to a net/http Server.
//
// The configuration conf may be nil.
@@ -257,7 +249,10 @@
if conf == nil {
conf = new(Server)
}
- conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
+ conf.state = &serverInternalState{
+ activeConns: make(map[*serverConn]struct{}),
+ errChanPool: sync.Pool{New: func() any { return make(chan error, 1) }},
+ }
if h1, h2 := s, conf; h2.IdleTimeout == 0 {
if h1.IdleTimeout != 0 {
h2.IdleTimeout = h1.IdleTimeout
@@ -423,6 +418,9 @@
//
// The opts parameter is optional. If nil, default values are used.
func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
+ if opts == nil {
+ opts = &ServeConnOpts{}
+ }
s.serveConn(c, opts, nil)
}
@@ -438,7 +436,7 @@
conn: c,
baseCtx: baseCtx,
remoteAddrStr: c.RemoteAddr().String(),
- bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout),
+ bw: newBufferedWriter(c, conf.WriteByteTimeout),
handler: opts.handler(),
streams: make(map[uint32]*stream),
readFrameCh: make(chan readFrameResult),
@@ -638,11 +636,11 @@
pingSent bool
sentPingData [8]byte
goAwayCode ErrCode
- shutdownTimer timer // nil until used
- idleTimer timer // nil if unused
+ shutdownTimer *time.Timer // nil until used
+ idleTimer *time.Timer // nil if unused
readIdleTimeout time.Duration
pingTimeout time.Duration
- readIdleTimer timer // nil if unused
+ readIdleTimer *time.Timer // nil if unused
// Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer
@@ -687,12 +685,12 @@
flow outflow // limits writing from Handler to client
inflow inflow // what the client is allowed to POST/etc to us
state streamState
- resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
- gotTrailerHeader bool // HEADER frame for trailers was seen
- wroteHeaders bool // whether we wrote headers (not status 100)
- readDeadline timer // nil if unused
- writeDeadline timer // nil if unused
- closeErr error // set before cw is closed
+ resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
+ gotTrailerHeader bool // HEADER frame for trailers was seen
+ wroteHeaders bool // whether we wrote headers (not status 100)
+ readDeadline *time.Timer // nil if unused
+ writeDeadline *time.Timer // nil if unused
+ closeErr error // set before cw is closed
trailer http.Header // accumulated trailers
reqTrailer http.Header // handler's Request.Trailer
@@ -848,7 +846,6 @@
// consumer is done with the frame.
// It's run on its own goroutine.
func (sc *serverConn) readFrames() {
- sc.srv.markNewGoroutine()
gate := make(chan struct{})
gateDone := func() { gate <- struct{}{} }
for {
@@ -881,7 +878,6 @@
// At most one goroutine can be running writeFrameAsync at a time per
// serverConn.
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
- sc.srv.markNewGoroutine()
var err error
if wd == nil {
err = wr.write.writeFrame(sc)
@@ -965,22 +961,22 @@
sc.setConnState(http.StateIdle)
if sc.srv.IdleTimeout > 0 {
- sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
+ sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
defer sc.idleTimer.Stop()
}
if conf.SendPingTimeout > 0 {
sc.readIdleTimeout = conf.SendPingTimeout
- sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
+ sc.readIdleTimer = time.AfterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
defer sc.readIdleTimer.Stop()
}
go sc.readFrames() // closed by defer sc.conn.Close above
- settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer)
+ settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
defer settingsTimer.Stop()
- lastFrameTime := sc.srv.now()
+ lastFrameTime := time.Now()
loopNum := 0
for {
loopNum++
@@ -994,7 +990,7 @@
case res := <-sc.wroteFrameCh:
sc.wroteFrame(res)
case res := <-sc.readFrameCh:
- lastFrameTime = sc.srv.now()
+ lastFrameTime = time.Now()
// Process any written frames before reading new frames from the client since a
// written frame could have triggered a new stream to be started.
if sc.writingFrameAsync {
@@ -1068,13 +1064,16 @@
func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
if sc.pingSent {
- sc.vlogf("timeout waiting for PING response")
+ sc.logf("timeout waiting for PING response")
+ if f := sc.countErrorFunc; f != nil {
+ f("conn_close_lost_ping")
+ }
sc.conn.Close()
return
}
pingAt := lastFrameReadTime.Add(sc.readIdleTimeout)
- now := sc.srv.now()
+ now := time.Now()
if pingAt.After(now) {
// We received frames since arming the ping timer.
// Reset it for the next possible timeout.
@@ -1138,10 +1137,10 @@
errc <- nil
}
}()
- timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server?
+ timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
defer timer.Stop()
select {
- case <-timer.C():
+ case <-timer.C:
return errPrefaceTimeout
case err := <-errc:
if err == nil {
@@ -1153,10 +1152,6 @@
}
}
-var errChanPool = sync.Pool{
- New: func() interface{} { return make(chan error, 1) },
-}
-
var writeDataPool = sync.Pool{
New: func() interface{} { return new(writeData) },
}
@@ -1164,7 +1159,7 @@
// writeDataFromHandler writes DATA response frames from a handler on
// the given stream.
func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
- ch := errChanPool.Get().(chan error)
+ ch := sc.srv.state.getErrChan()
writeArg := writeDataPool.Get().(*writeData)
*writeArg = writeData{stream.id, data, endStream}
err := sc.writeFrameFromHandler(FrameWriteRequest{
@@ -1196,7 +1191,7 @@
return errStreamClosed
}
}
- errChanPool.Put(ch)
+ sc.srv.state.putErrChan(ch)
if frameWriteDone {
writeDataPool.Put(writeArg)
}
@@ -1510,7 +1505,7 @@
func (sc *serverConn) shutDownIn(d time.Duration) {
sc.serveG.check()
- sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer)
+ sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
}
func (sc *serverConn) resetStream(se StreamError) {
@@ -2115,7 +2110,7 @@
// (in Go 1.8), though. That's a more sane option anyway.
if sc.hs.ReadTimeout > 0 {
sc.conn.SetReadDeadline(time.Time{})
- st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
+ st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
}
return sc.scheduleHandler(id, rw, req, handler)
@@ -2213,7 +2208,7 @@
st.flow.add(sc.initialStreamSendWindowSize)
st.inflow.init(sc.initialStreamRecvWindowSize)
if sc.hs.WriteTimeout > 0 {
- st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
+ st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
}
sc.streams[id] = st
@@ -2402,7 +2397,6 @@
// Run on its own goroutine.
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
- sc.srv.markNewGoroutine()
defer sc.sendServeMsg(handlerDoneMsg)
didPanic := true
defer func() {
@@ -2451,7 +2445,7 @@
// waiting for this frame to be written, so an http.Flush mid-handler
// writes out the correct value of keys, before a handler later potentially
// mutates it.
- errc = errChanPool.Get().(chan error)
+ errc = sc.srv.state.getErrChan()
}
if err := sc.writeFrameFromHandler(FrameWriteRequest{
write: headerData,
@@ -2463,7 +2457,7 @@
if errc != nil {
select {
case err := <-errc:
- errChanPool.Put(errc)
+ sc.srv.state.putErrChan(errc)
return err
case <-sc.doneServing:
return errClientDisconnected
@@ -2570,7 +2564,7 @@
if err == io.EOF {
b.sawEOF = true
}
- if b.conn == nil && inTests {
+ if b.conn == nil {
return
}
b.conn.noteBodyReadFromHandler(b.stream, n, err)
@@ -2699,7 +2693,7 @@
var date string
if _, ok := rws.snapHeader["Date"]; !ok {
// TODO(bradfitz): be faster here, like net/http? measure.
- date = rws.conn.srv.now().UTC().Format(http.TimeFormat)
+ date = time.Now().UTC().Format(http.TimeFormat)
}
for _, v := range rws.snapHeader["Trailer"] {
@@ -2821,7 +2815,7 @@
func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
st := w.rws.stream
- if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
+ if !deadline.IsZero() && deadline.Before(time.Now()) {
// If we're setting a deadline in the past, reset the stream immediately
// so writes after SetWriteDeadline returns will fail.
st.onReadTimeout()
@@ -2837,9 +2831,9 @@
if deadline.IsZero() {
st.readDeadline = nil
} else if st.readDeadline == nil {
- st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout)
+ st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
} else {
- st.readDeadline.Reset(deadline.Sub(sc.srv.now()))
+ st.readDeadline.Reset(deadline.Sub(time.Now()))
}
})
return nil
@@ -2847,7 +2841,7 @@
func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
st := w.rws.stream
- if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
+ if !deadline.IsZero() && deadline.Before(time.Now()) {
// If we're setting a deadline in the past, reset the stream immediately
// so writes after SetWriteDeadline returns will fail.
st.onWriteTimeout()
@@ -2863,9 +2857,9 @@
if deadline.IsZero() {
st.writeDeadline = nil
} else if st.writeDeadline == nil {
- st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout)
+ st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
} else {
- st.writeDeadline.Reset(deadline.Sub(sc.srv.now()))
+ st.writeDeadline.Reset(deadline.Sub(time.Now()))
}
})
return nil
@@ -3144,7 +3138,7 @@
method: opts.Method,
url: u,
header: cloneHeader(opts.Header),
- done: errChanPool.Get().(chan error),
+ done: sc.srv.state.getErrChan(),
}
select {
@@ -3161,7 +3155,7 @@
case <-st.cw:
return errStreamClosed
case err := <-msg.done:
- errChanPool.Put(msg.done)
+ sc.srv.state.putErrChan(msg.done)
return err
}
}