[VOL-5486] Fix deprecated versions
Change-Id: If0b888d6c2f33b2f415c8b03b08dc994bb3df3f4
Signed-off-by: Abhay Kumar <abhay.kumar@radisys.com>
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 6c349f3..bdc5520 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -29,6 +29,7 @@
"bufio"
"bytes"
"context"
+ "crypto/rand"
"crypto/tls"
"errors"
"fmt"
@@ -49,13 +50,18 @@
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
+ "golang.org/x/net/internal/httpcommon"
)
const (
- prefaceTimeout = 10 * time.Second
- firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
- handlerChunkWriteSize = 4 << 10
- defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
+ prefaceTimeout = 10 * time.Second
+ firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
+ handlerChunkWriteSize = 4 << 10
+ defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
+
+ // maxQueuedControlFrames is the maximum number of control frames like
+ // SETTINGS, PING and RST_STREAM that will be queued for writing before
+ // the connection is closed to prevent memory exhaustion attacks.
maxQueuedControlFrames = 10000
)
@@ -127,6 +133,22 @@
// If zero or negative, there is no timeout.
IdleTimeout time.Duration
+ // ReadIdleTimeout is the timeout after which a health check using a ping
+ // frame will be carried out if no frame is received on the connection.
+ // If zero, no health check is performed.
+ ReadIdleTimeout time.Duration
+
+ // PingTimeout is the timeout after which the connection will be closed
+ // if a response to a ping is not received.
+ // If zero, a default of 15 seconds is used.
+ PingTimeout time.Duration
+
+ // WriteByteTimeout is the timeout after which a connection will be
+ // closed if no data can be written to it. The timeout begins when data is
+ // available to write, and is extended whenever any bytes are written.
+ // If zero or negative, there is no timeout.
+ WriteByteTimeout time.Duration
+
// MaxUploadBufferPerConnection is the size of the initial flow
// control window for each connections. The HTTP/2 spec does not
// allow this to be smaller than 65535 or larger than 2^32-1.
@@ -154,95 +176,15 @@
// so that we don't embed a Mutex in this struct, which will make the
// struct non-copyable, which might break some callers.
state *serverInternalState
-
- // Synchronization group used for testing.
- // Outside of tests, this is nil.
- group synctestGroupInterface
-}
-
-func (s *Server) markNewGoroutine() {
- if s.group != nil {
- s.group.Join()
- }
-}
-
-func (s *Server) now() time.Time {
- if s.group != nil {
- return s.group.Now()
- }
- return time.Now()
-}
-
-// newTimer creates a new time.Timer, or a synthetic timer in tests.
-func (s *Server) newTimer(d time.Duration) timer {
- if s.group != nil {
- return s.group.NewTimer(d)
- }
- return timeTimer{time.NewTimer(d)}
-}
-
-// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
-func (s *Server) afterFunc(d time.Duration, f func()) timer {
- if s.group != nil {
- return s.group.AfterFunc(d, f)
- }
- return timeTimer{time.AfterFunc(d, f)}
-}
-
-func (s *Server) initialConnRecvWindowSize() int32 {
- if s.MaxUploadBufferPerConnection >= initialWindowSize {
- return s.MaxUploadBufferPerConnection
- }
- return 1 << 20
-}
-
-func (s *Server) initialStreamRecvWindowSize() int32 {
- if s.MaxUploadBufferPerStream > 0 {
- return s.MaxUploadBufferPerStream
- }
- return 1 << 20
-}
-
-func (s *Server) maxReadFrameSize() uint32 {
- if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
- return v
- }
- return defaultMaxReadFrameSize
-}
-
-func (s *Server) maxConcurrentStreams() uint32 {
- if v := s.MaxConcurrentStreams; v > 0 {
- return v
- }
- return defaultMaxStreams
-}
-
-func (s *Server) maxDecoderHeaderTableSize() uint32 {
- if v := s.MaxDecoderHeaderTableSize; v > 0 {
- return v
- }
- return initialHeaderTableSize
-}
-
-func (s *Server) maxEncoderHeaderTableSize() uint32 {
- if v := s.MaxEncoderHeaderTableSize; v > 0 {
- return v
- }
- return initialHeaderTableSize
-}
-
-// maxQueuedControlFrames is the maximum number of control frames like
-// SETTINGS, PING and RST_STREAM that will be queued for writing before
-// the connection is closed to prevent memory exhaustion attacks.
-func (s *Server) maxQueuedControlFrames() int {
- // TODO: if anybody asks, add a Server field, and remember to define the
- // behavior of negative values.
- return maxQueuedControlFrames
}
type serverInternalState struct {
mu sync.Mutex
activeConns map[*serverConn]struct{}
+
+ // Pool of error channels. This is per-Server rather than global
+ // because channels can't be reused across synctest bubbles.
+ errChanPool sync.Pool
}
func (s *serverInternalState) registerConn(sc *serverConn) {
@@ -274,6 +216,27 @@
s.mu.Unlock()
}
+// Global error channel pool used for uninitialized Servers.
+// We use a per-Server pool when possible to avoid using channels across synctest bubbles.
+var errChanPool = sync.Pool{
+ New: func() any { return make(chan error, 1) },
+}
+
+func (s *serverInternalState) getErrChan() chan error {
+ if s == nil {
+ return errChanPool.Get().(chan error) // Server used without calling ConfigureServer
+ }
+ return s.errChanPool.Get().(chan error)
+}
+
+func (s *serverInternalState) putErrChan(ch chan error) {
+ if s == nil {
+ errChanPool.Put(ch) // Server used without calling ConfigureServer
+ return
+ }
+ s.errChanPool.Put(ch)
+}
+
// ConfigureServer adds HTTP/2 support to a net/http Server.
//
// The configuration conf may be nil.
@@ -286,7 +249,10 @@
if conf == nil {
conf = new(Server)
}
- conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
+ conf.state = &serverInternalState{
+ activeConns: make(map[*serverConn]struct{}),
+ errChanPool: sync.Pool{New: func() any { return make(chan error, 1) }},
+ }
if h1, h2 := s, conf; h2.IdleTimeout == 0 {
if h1.IdleTimeout != 0 {
h2.IdleTimeout = h1.IdleTimeout
@@ -336,7 +302,7 @@
if s.TLSNextProto == nil {
s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
}
- protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) {
+ protoHandler := func(hs *http.Server, c net.Conn, h http.Handler, sawClientPreface bool) {
if testHookOnConn != nil {
testHookOnConn()
}
@@ -353,12 +319,31 @@
ctx = bc.BaseContext()
}
conf.ServeConn(c, &ServeConnOpts{
- Context: ctx,
- Handler: h,
- BaseConfig: hs,
+ Context: ctx,
+ Handler: h,
+ BaseConfig: hs,
+ SawClientPreface: sawClientPreface,
})
}
- s.TLSNextProto[NextProtoTLS] = protoHandler
+ s.TLSNextProto[NextProtoTLS] = func(hs *http.Server, c *tls.Conn, h http.Handler) {
+ protoHandler(hs, c, h, false)
+ }
+ // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns.
+ //
+ // A connection passed in this method has already had the HTTP/2 preface read from it.
+ s.TLSNextProto[nextProtoUnencryptedHTTP2] = func(hs *http.Server, c *tls.Conn, h http.Handler) {
+ nc, err := unencryptedNetConnFromTLSConn(c)
+ if err != nil {
+ if lg := hs.ErrorLog; lg != nil {
+ lg.Print(err)
+ } else {
+ log.Print(err)
+ }
+ go c.Close()
+ return
+ }
+ protoHandler(hs, nc, h, true)
+ }
return nil
}
@@ -433,6 +418,9 @@
//
// The opts parameter is optional. If nil, default values are used.
func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
+ if opts == nil {
+ opts = &ServeConnOpts{}
+ }
s.serveConn(c, opts, nil)
}
@@ -440,13 +428,15 @@
baseCtx, cancel := serverConnBaseContext(c, opts)
defer cancel()
+ http1srv := opts.baseConfig()
+ conf := configFromServer(http1srv, s)
sc := &serverConn{
srv: s,
- hs: opts.baseConfig(),
+ hs: http1srv,
conn: c,
baseCtx: baseCtx,
remoteAddrStr: c.RemoteAddr().String(),
- bw: newBufferedWriter(c),
+ bw: newBufferedWriter(c, conf.WriteByteTimeout),
handler: opts.handler(),
streams: make(map[uint32]*stream),
readFrameCh: make(chan readFrameResult),
@@ -456,9 +446,12 @@
bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
doneServing: make(chan struct{}),
clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
- advMaxStreams: s.maxConcurrentStreams(),
+ advMaxStreams: conf.MaxConcurrentStreams,
initialStreamSendWindowSize: initialWindowSize,
+ initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
maxFrameSize: initialMaxFrameSize,
+ pingTimeout: conf.PingTimeout,
+ countErrorFunc: conf.CountError,
serveG: newGoroutineLock(),
pushEnabled: true,
sawClientPreface: opts.SawClientPreface,
@@ -491,15 +484,15 @@
sc.flow.add(initialWindowSize)
sc.inflow.init(initialWindowSize)
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
- sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize())
+ sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize)
fr := NewFramer(sc.bw, c)
- if s.CountError != nil {
- fr.countError = s.CountError
+ if conf.CountError != nil {
+ fr.countError = conf.CountError
}
- fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil)
+ fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil)
fr.MaxHeaderListSize = sc.maxHeaderListSize()
- fr.SetMaxReadFrameSize(s.maxReadFrameSize())
+ fr.SetMaxReadFrameSize(conf.MaxReadFrameSize)
sc.framer = fr
if tc, ok := c.(connectionStater); ok {
@@ -532,7 +525,7 @@
// So for now, do nothing here again.
}
- if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
+ if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
// "Endpoints MAY choose to generate a connection error
// (Section 5.4.1) of type INADEQUATE_SECURITY if one of
// the prohibited cipher suites are negotiated."
@@ -569,7 +562,7 @@
opts.UpgradeRequest = nil
}
- sc.serve()
+ sc.serve(conf)
}
func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) {
@@ -609,6 +602,7 @@
tlsState *tls.ConnectionState // shared by all handlers, like net/http
remoteAddrStr string
writeSched WriteScheduler
+ countErrorFunc func(errType string)
// Everything following is owned by the serve loop; use serveG.check():
serveG goroutineLock // used to verify funcs are on serve()
@@ -628,6 +622,7 @@
streams map[uint32]*stream
unstartedHandlers []unstartedHandler
initialStreamSendWindowSize int32
+ initialStreamRecvWindowSize int32
maxFrameSize int32
peerMaxHeaderListSize uint32 // zero means unknown (default)
canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
@@ -638,9 +633,14 @@
inGoAway bool // we've started to or sent GOAWAY
inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
needToSendGoAway bool // we need to schedule a GOAWAY frame write
+ pingSent bool
+ sentPingData [8]byte
goAwayCode ErrCode
- shutdownTimer timer // nil until used
- idleTimer timer // nil if unused
+ shutdownTimer *time.Timer // nil until used
+ idleTimer *time.Timer // nil if unused
+ readIdleTimeout time.Duration
+ pingTimeout time.Duration
+ readIdleTimer *time.Timer // nil if unused
// Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer
@@ -655,11 +655,7 @@
if n <= 0 {
n = http.DefaultMaxHeaderBytes
}
- // http2's count is in a slightly different unit and includes 32 bytes per pair.
- // So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
- const perFieldOverhead = 32 // per http2 spec
- const typicalHeaders = 10 // conservative
- return uint32(n + typicalHeaders*perFieldOverhead)
+ return uint32(adjustHTTP1MaxHeaderSize(int64(n)))
}
func (sc *serverConn) curOpenStreams() uint32 {
@@ -689,12 +685,12 @@
flow outflow // limits writing from Handler to client
inflow inflow // what the client is allowed to POST/etc to us
state streamState
- resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
- gotTrailerHeader bool // HEADER frame for trailers was seen
- wroteHeaders bool // whether we wrote headers (not status 100)
- readDeadline timer // nil if unused
- writeDeadline timer // nil if unused
- closeErr error // set before cw is closed
+ resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
+ gotTrailerHeader bool // HEADER frame for trailers was seen
+ wroteHeaders bool // whether we wrote headers (not status 100)
+ readDeadline *time.Timer // nil if unused
+ writeDeadline *time.Timer // nil if unused
+ closeErr error // set before cw is closed
trailer http.Header // accumulated trailers
reqTrailer http.Header // handler's Request.Trailer
@@ -815,8 +811,7 @@
func (sc *serverConn) canonicalHeader(v string) string {
sc.serveG.check()
- buildCommonHeaderMapsOnce()
- cv, ok := commonCanonHeader[v]
+ cv, ok := httpcommon.CachedCanonicalHeader(v)
if ok {
return cv
}
@@ -851,7 +846,6 @@
// consumer is done with the frame.
// It's run on its own goroutine.
func (sc *serverConn) readFrames() {
- sc.srv.markNewGoroutine()
gate := make(chan struct{})
gateDone := func() { gate <- struct{}{} }
for {
@@ -884,7 +878,6 @@
// At most one goroutine can be running writeFrameAsync at a time per
// serverConn.
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
- sc.srv.markNewGoroutine()
var err error
if wd == nil {
err = wr.write.writeFrame(sc)
@@ -923,7 +916,7 @@
}
}
-func (sc *serverConn) serve() {
+func (sc *serverConn) serve(conf http2Config) {
sc.serveG.check()
defer sc.notePanic()
defer sc.conn.Close()
@@ -935,20 +928,24 @@
sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
}
+ settings := writeSettings{
+ {SettingMaxFrameSize, conf.MaxReadFrameSize},
+ {SettingMaxConcurrentStreams, sc.advMaxStreams},
+ {SettingMaxHeaderListSize, sc.maxHeaderListSize()},
+ {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize},
+ {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)},
+ }
+ if !disableExtendedConnectProtocol {
+ settings = append(settings, Setting{SettingEnableConnectProtocol, 1})
+ }
sc.writeFrame(FrameWriteRequest{
- write: writeSettings{
- {SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
- {SettingMaxConcurrentStreams, sc.advMaxStreams},
- {SettingMaxHeaderListSize, sc.maxHeaderListSize()},
- {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()},
- {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
- },
+ write: settings,
})
sc.unackedSettings++
// Each connection starts with initialWindowSize inflow tokens.
// If a higher value is configured, we add more tokens.
- if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
+ if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 {
sc.sendWindowUpdate(nil, int(diff))
}
@@ -964,15 +961,22 @@
sc.setConnState(http.StateIdle)
if sc.srv.IdleTimeout > 0 {
- sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
+ sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
defer sc.idleTimer.Stop()
}
+ if conf.SendPingTimeout > 0 {
+ sc.readIdleTimeout = conf.SendPingTimeout
+ sc.readIdleTimer = time.AfterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
+ defer sc.readIdleTimer.Stop()
+ }
+
go sc.readFrames() // closed by defer sc.conn.Close above
- settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer)
+ settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
defer settingsTimer.Stop()
+ lastFrameTime := time.Now()
loopNum := 0
for {
loopNum++
@@ -986,6 +990,7 @@
case res := <-sc.wroteFrameCh:
sc.wroteFrame(res)
case res := <-sc.readFrameCh:
+ lastFrameTime = time.Now()
// Process any written frames before reading new frames from the client since a
// written frame could have triggered a new stream to be started.
if sc.writingFrameAsync {
@@ -1017,6 +1022,8 @@
case idleTimerMsg:
sc.vlogf("connection is idle")
sc.goAway(ErrCodeNo)
+ case readIdleTimerMsg:
+ sc.handlePingTimer(lastFrameTime)
case shutdownTimerMsg:
sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
return
@@ -1039,7 +1046,7 @@
// If the peer is causing us to generate a lot of control frames,
// but not reading them from us, assume they are trying to make us
// run out of memory.
- if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() {
+ if sc.queuedControlFrames > maxQueuedControlFrames {
sc.vlogf("http2: too many control frames in send queue, closing connection")
return
}
@@ -1055,12 +1062,42 @@
}
}
+func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
+ if sc.pingSent {
+ sc.logf("timeout waiting for PING response")
+ if f := sc.countErrorFunc; f != nil {
+ f("conn_close_lost_ping")
+ }
+ sc.conn.Close()
+ return
+ }
+
+ pingAt := lastFrameReadTime.Add(sc.readIdleTimeout)
+ now := time.Now()
+ if pingAt.After(now) {
+ // We received frames since arming the ping timer.
+ // Reset it for the next possible timeout.
+ sc.readIdleTimer.Reset(pingAt.Sub(now))
+ return
+ }
+
+ sc.pingSent = true
+ // Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does
+ // is we send a PING frame containing 0s.
+ _, _ = rand.Read(sc.sentPingData[:])
+ sc.writeFrame(FrameWriteRequest{
+ write: &writePing{data: sc.sentPingData},
+ })
+ sc.readIdleTimer.Reset(sc.pingTimeout)
+}
+
type serverMessage int
// Message values sent to serveMsgCh.
var (
settingsTimerMsg = new(serverMessage)
idleTimerMsg = new(serverMessage)
+ readIdleTimerMsg = new(serverMessage)
shutdownTimerMsg = new(serverMessage)
gracefulShutdownMsg = new(serverMessage)
handlerDoneMsg = new(serverMessage)
@@ -1068,6 +1105,7 @@
func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) }
+func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) }
func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }
func (sc *serverConn) sendServeMsg(msg interface{}) {
@@ -1099,10 +1137,10 @@
errc <- nil
}
}()
- timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server?
+ timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
defer timer.Stop()
select {
- case <-timer.C():
+ case <-timer.C:
return errPrefaceTimeout
case err := <-errc:
if err == nil {
@@ -1114,10 +1152,6 @@
}
}
-var errChanPool = sync.Pool{
- New: func() interface{} { return make(chan error, 1) },
-}
-
var writeDataPool = sync.Pool{
New: func() interface{} { return new(writeData) },
}
@@ -1125,7 +1159,7 @@
// writeDataFromHandler writes DATA response frames from a handler on
// the given stream.
func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
- ch := errChanPool.Get().(chan error)
+ ch := sc.srv.state.getErrChan()
writeArg := writeDataPool.Get().(*writeData)
*writeArg = writeData{stream.id, data, endStream}
err := sc.writeFrameFromHandler(FrameWriteRequest{
@@ -1157,7 +1191,7 @@
return errStreamClosed
}
}
- errChanPool.Put(ch)
+ sc.srv.state.putErrChan(ch)
if frameWriteDone {
writeDataPool.Put(writeArg)
}
@@ -1320,6 +1354,10 @@
sc.writingFrame = false
sc.writingFrameAsync = false
+ if res.err != nil {
+ sc.conn.Close()
+ }
+
wr := res.wr
if writeEndsStream(wr.write) {
@@ -1467,7 +1505,7 @@
func (sc *serverConn) shutDownIn(d time.Duration) {
sc.serveG.check()
- sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer)
+ sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
}
func (sc *serverConn) resetStream(se StreamError) {
@@ -1594,6 +1632,11 @@
func (sc *serverConn) processPing(f *PingFrame) error {
sc.serveG.check()
if f.IsAck() {
+ if sc.pingSent && sc.sentPingData == f.Data {
+ // This is a response to a PING we sent.
+ sc.pingSent = false
+ sc.readIdleTimer.Reset(sc.readIdleTimeout)
+ }
// 6.7 PING: " An endpoint MUST NOT respond to PING frames
// containing this flag."
return nil
@@ -1757,6 +1800,9 @@
sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31
case SettingMaxHeaderListSize:
sc.peerMaxHeaderListSize = s.Val
+ case SettingEnableConnectProtocol:
+ // Receipt of this parameter by a server does not
+ // have any impact
default:
// Unknown setting: "An endpoint that receives a SETTINGS
// frame with any unknown or unsupported identifier MUST
@@ -2064,7 +2110,7 @@
// (in Go 1.8), though. That's a more sane option anyway.
if sc.hs.ReadTimeout > 0 {
sc.conn.SetReadDeadline(time.Time{})
- st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
+ st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
}
return sc.scheduleHandler(id, rw, req, handler)
@@ -2160,9 +2206,9 @@
st.cw.Init()
st.flow.conn = &sc.flow // link to conn-level counter
st.flow.add(sc.initialStreamSendWindowSize)
- st.inflow.init(sc.srv.initialStreamRecvWindowSize())
+ st.inflow.init(sc.initialStreamRecvWindowSize)
if sc.hs.WriteTimeout > 0 {
- st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
+ st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
}
sc.streams[id] = st
@@ -2182,19 +2228,25 @@
func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
sc.serveG.check()
- rp := requestParam{
- method: f.PseudoValue("method"),
- scheme: f.PseudoValue("scheme"),
- authority: f.PseudoValue("authority"),
- path: f.PseudoValue("path"),
+ rp := httpcommon.ServerRequestParam{
+ Method: f.PseudoValue("method"),
+ Scheme: f.PseudoValue("scheme"),
+ Authority: f.PseudoValue("authority"),
+ Path: f.PseudoValue("path"),
+ Protocol: f.PseudoValue("protocol"),
}
- isConnect := rp.method == "CONNECT"
+ // extended connect is disabled, so we should not see :protocol
+ if disableExtendedConnectProtocol && rp.Protocol != "" {
+ return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
+ }
+
+ isConnect := rp.Method == "CONNECT"
if isConnect {
- if rp.path != "" || rp.scheme != "" || rp.authority == "" {
+ if rp.Protocol == "" && (rp.Path != "" || rp.Scheme != "" || rp.Authority == "") {
return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
}
- } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
+ } else if rp.Method == "" || rp.Path == "" || (rp.Scheme != "https" && rp.Scheme != "http") {
// See 8.1.2.6 Malformed Requests and Responses:
//
// Malformed requests or responses that are detected
@@ -2208,12 +2260,16 @@
return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol))
}
- rp.header = make(http.Header)
+ header := make(http.Header)
+ rp.Header = header
for _, hf := range f.RegularFields() {
- rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
+ header.Add(sc.canonicalHeader(hf.Name), hf.Value)
}
- if rp.authority == "" {
- rp.authority = rp.header.Get("Host")
+ if rp.Authority == "" {
+ rp.Authority = header.Get("Host")
+ }
+ if rp.Protocol != "" {
+ header.Set(":protocol", rp.Protocol)
}
rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
@@ -2222,7 +2278,7 @@
}
bodyOpen := !f.StreamEnded()
if bodyOpen {
- if vv, ok := rp.header["Content-Length"]; ok {
+ if vv, ok := rp.Header["Content-Length"]; ok {
if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil {
req.ContentLength = int64(cl)
} else {
@@ -2238,83 +2294,38 @@
return rw, req, nil
}
-type requestParam struct {
- method string
- scheme, authority, path string
- header http.Header
-}
-
-func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) {
+func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp httpcommon.ServerRequestParam) (*responseWriter, *http.Request, error) {
sc.serveG.check()
var tlsState *tls.ConnectionState // nil if not scheme https
- if rp.scheme == "https" {
+ if rp.Scheme == "https" {
tlsState = sc.tlsState
}
- needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue")
- if needsContinue {
- rp.header.Del("Expect")
- }
- // Merge Cookie headers into one "; "-delimited value.
- if cookies := rp.header["Cookie"]; len(cookies) > 1 {
- rp.header.Set("Cookie", strings.Join(cookies, "; "))
- }
-
- // Setup Trailers
- var trailer http.Header
- for _, v := range rp.header["Trailer"] {
- for _, key := range strings.Split(v, ",") {
- key = http.CanonicalHeaderKey(textproto.TrimString(key))
- switch key {
- case "Transfer-Encoding", "Trailer", "Content-Length":
- // Bogus. (copy of http1 rules)
- // Ignore.
- default:
- if trailer == nil {
- trailer = make(http.Header)
- }
- trailer[key] = nil
- }
- }
- }
- delete(rp.header, "Trailer")
-
- var url_ *url.URL
- var requestURI string
- if rp.method == "CONNECT" {
- url_ = &url.URL{Host: rp.authority}
- requestURI = rp.authority // mimic HTTP/1 server behavior
- } else {
- var err error
- url_, err = url.ParseRequestURI(rp.path)
- if err != nil {
- return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol))
- }
- requestURI = rp.path
+ res := httpcommon.NewServerRequest(rp)
+ if res.InvalidReason != "" {
+ return nil, nil, sc.countError(res.InvalidReason, streamError(st.id, ErrCodeProtocol))
}
body := &requestBody{
conn: sc,
stream: st,
- needsContinue: needsContinue,
+ needsContinue: res.NeedsContinue,
}
- req := &http.Request{
- Method: rp.method,
- URL: url_,
+ req := (&http.Request{
+ Method: rp.Method,
+ URL: res.URL,
RemoteAddr: sc.remoteAddrStr,
- Header: rp.header,
- RequestURI: requestURI,
+ Header: rp.Header,
+ RequestURI: res.RequestURI,
Proto: "HTTP/2.0",
ProtoMajor: 2,
ProtoMinor: 0,
TLS: tlsState,
- Host: rp.authority,
+ Host: rp.Authority,
Body: body,
- Trailer: trailer,
- }
- req = req.WithContext(st.ctx)
-
+ Trailer: res.Trailer,
+ }).WithContext(st.ctx)
rw := sc.newResponseWriter(st, req)
return rw, req, nil
}
@@ -2386,7 +2397,6 @@
// Run on its own goroutine.
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
- sc.srv.markNewGoroutine()
defer sc.sendServeMsg(handlerDoneMsg)
didPanic := true
defer func() {
@@ -2435,7 +2445,7 @@
// waiting for this frame to be written, so an http.Flush mid-handler
// writes out the correct value of keys, before a handler later potentially
// mutates it.
- errc = errChanPool.Get().(chan error)
+ errc = sc.srv.state.getErrChan()
}
if err := sc.writeFrameFromHandler(FrameWriteRequest{
write: headerData,
@@ -2447,7 +2457,7 @@
if errc != nil {
select {
case err := <-errc:
- errChanPool.Put(errc)
+ sc.srv.state.putErrChan(errc)
return err
case <-sc.doneServing:
return errClientDisconnected
@@ -2554,7 +2564,7 @@
if err == io.EOF {
b.sawEOF = true
}
- if b.conn == nil && inTests {
+ if b.conn == nil {
return
}
b.conn.noteBodyReadFromHandler(b.stream, n, err)
@@ -2683,7 +2693,7 @@
var date string
if _, ok := rws.snapHeader["Date"]; !ok {
// TODO(bradfitz): be faster here, like net/http? measure.
- date = rws.conn.srv.now().UTC().Format(http.TimeFormat)
+ date = time.Now().UTC().Format(http.TimeFormat)
}
for _, v := range rws.snapHeader["Trailer"] {
@@ -2805,7 +2815,7 @@
func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
st := w.rws.stream
- if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
+ if !deadline.IsZero() && deadline.Before(time.Now()) {
// If we're setting a deadline in the past, reset the stream immediately
// so writes after SetWriteDeadline returns will fail.
st.onReadTimeout()
@@ -2821,9 +2831,9 @@
if deadline.IsZero() {
st.readDeadline = nil
} else if st.readDeadline == nil {
- st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout)
+ st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
} else {
- st.readDeadline.Reset(deadline.Sub(sc.srv.now()))
+ st.readDeadline.Reset(deadline.Sub(time.Now()))
}
})
return nil
@@ -2831,7 +2841,7 @@
func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
st := w.rws.stream
- if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
+ if !deadline.IsZero() && deadline.Before(time.Now()) {
// If we're setting a deadline in the past, reset the stream immediately
// so writes after SetWriteDeadline returns will fail.
st.onWriteTimeout()
@@ -2847,14 +2857,19 @@
if deadline.IsZero() {
st.writeDeadline = nil
} else if st.writeDeadline == nil {
- st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout)
+ st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
} else {
- st.writeDeadline.Reset(deadline.Sub(sc.srv.now()))
+ st.writeDeadline.Reset(deadline.Sub(time.Now()))
}
})
return nil
}
+func (w *responseWriter) EnableFullDuplex() error {
+ // We always support full duplex responses, so this is a no-op.
+ return nil
+}
+
func (w *responseWriter) Flush() {
w.FlushError()
}
@@ -3123,7 +3138,7 @@
method: opts.Method,
url: u,
header: cloneHeader(opts.Header),
- done: errChanPool.Get().(chan error),
+ done: sc.srv.state.getErrChan(),
}
select {
@@ -3140,7 +3155,7 @@
case <-st.cw:
return errStreamClosed
case err := <-msg.done:
- errChanPool.Put(msg.done)
+ sc.srv.state.putErrChan(msg.done)
return err
}
}
@@ -3204,12 +3219,12 @@
// we start in "half closed (remote)" for simplicity.
// See further comments at the definition of stateHalfClosedRemote.
promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote)
- rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{
- method: msg.method,
- scheme: msg.url.Scheme,
- authority: msg.url.Host,
- path: msg.url.RequestURI(),
- header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
+ rw, req, err := sc.newWriterAndRequestNoBody(promised, httpcommon.ServerRequestParam{
+ Method: msg.method,
+ Scheme: msg.url.Scheme,
+ Authority: msg.url.Host,
+ Path: msg.url.RequestURI(),
+ Header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
})
if err != nil {
// Should not happen, since we've already validated msg.url.
@@ -3301,7 +3316,7 @@
if sc == nil || sc.srv == nil {
return err
}
- f := sc.srv.CountError
+ f := sc.countErrorFunc
if f == nil {
return err
}