[VOL-5486] Fix deprecated versions

Change-Id: I3e03ea246020547ae75fa92ce8cf5cbba7e8f3bb
Signed-off-by: Abhay Kumar <abhay.kumar@radisys.com>
diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go
index db1c95f..d3cb951 100644
--- a/vendor/golang.org/x/net/context/context.go
+++ b/vendor/golang.org/x/net/context/context.go
@@ -6,7 +6,7 @@
 // cancellation signals, and other request-scoped values across API boundaries
 // and between processes.
 // As of Go 1.7 this package is available in the standard library under the
-// name [context], and migrating to it can be done automatically with [go fix].
+// name [context].
 //
 // Incoming requests to a server should create a [Context], and outgoing
 // calls to servers should accept a Context. The chain of function
@@ -38,8 +38,6 @@
 //
 // See https://go.dev/blog/context for example code for a server that uses
 // Contexts.
-//
-// [go fix]: https://go.dev/cmd/go#hdr-Update_packages_to_use_new_APIs
 package context
 
 import (
@@ -51,36 +49,37 @@
 // API boundaries.
 //
 // Context's methods may be called by multiple goroutines simultaneously.
+//
+//go:fix inline
 type Context = context.Context
 
 // Canceled is the error returned by [Context.Err] when the context is canceled
 // for some reason other than its deadline passing.
+//
+//go:fix inline
 var Canceled = context.Canceled
 
 // DeadlineExceeded is the error returned by [Context.Err] when the context is canceled
 // due to its deadline passing.
+//
+//go:fix inline
 var DeadlineExceeded = context.DeadlineExceeded
 
 // Background returns a non-nil, empty Context. It is never canceled, has no
 // values, and has no deadline. It is typically used by the main function,
 // initialization, and tests, and as the top-level Context for incoming
 // requests.
-func Background() Context {
-	return background
-}
+//
+//go:fix inline
+func Background() Context { return context.Background() }
 
 // TODO returns a non-nil, empty Context. Code should use context.TODO when
 // it's unclear which Context to use or it is not yet available (because the
 // surrounding function has not yet been extended to accept a Context
 // parameter).
-func TODO() Context {
-	return todo
-}
-
-var (
-	background = context.Background()
-	todo       = context.TODO()
-)
+//
+//go:fix inline
+func TODO() Context { return context.TODO() }
 
 // A CancelFunc tells an operation to abandon its work.
 // A CancelFunc does not wait for the work to stop.
@@ -95,6 +94,8 @@
 //
 // Canceling this context releases resources associated with it, so code should
 // call cancel as soon as the operations running in this [Context] complete.
+//
+//go:fix inline
 func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
 	return context.WithCancel(parent)
 }
@@ -108,6 +109,8 @@
 //
 // Canceling this context releases resources associated with it, so code should
 // call cancel as soon as the operations running in this [Context] complete.
+//
+//go:fix inline
 func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) {
 	return context.WithDeadline(parent, d)
 }
@@ -122,6 +125,8 @@
 //		defer cancel()  // releases resources if slowOperation completes before timeout elapses
 //		return slowOperation(ctx)
 //	}
+//
+//go:fix inline
 func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
 	return context.WithTimeout(parent, timeout)
 }
@@ -139,6 +144,8 @@
 // interface{}, context keys often have concrete type
 // struct{}. Alternatively, exported context key variables' static
 // type should be a pointer or interface.
+//
+//go:fix inline
 func WithValue(parent Context, key, val interface{}) Context {
 	return context.WithValue(parent, key, val)
 }
diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go
index ca645d9..8a7a89d 100644
--- a/vendor/golang.org/x/net/http2/config.go
+++ b/vendor/golang.org/x/net/http2/config.go
@@ -27,6 +27,7 @@
 //   - If the resulting value is zero or out of range, use a default.
 type http2Config struct {
 	MaxConcurrentStreams         uint32
+	StrictMaxConcurrentRequests  bool
 	MaxDecoderHeaderTableSize    uint32
 	MaxEncoderHeaderTableSize    uint32
 	MaxReadFrameSize             uint32
@@ -55,7 +56,7 @@
 		PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites,
 		CountError:                   h2.CountError,
 	}
-	fillNetHTTPServerConfig(&conf, h1)
+	fillNetHTTPConfig(&conf, h1.HTTP2)
 	setConfigDefaults(&conf, true)
 	return conf
 }
@@ -64,12 +65,13 @@
 // (the net/http Transport).
 func configFromTransport(h2 *Transport) http2Config {
 	conf := http2Config{
-		MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
-		MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
-		MaxReadFrameSize:          h2.MaxReadFrameSize,
-		SendPingTimeout:           h2.ReadIdleTimeout,
-		PingTimeout:               h2.PingTimeout,
-		WriteByteTimeout:          h2.WriteByteTimeout,
+		StrictMaxConcurrentRequests: h2.StrictMaxConcurrentStreams,
+		MaxEncoderHeaderTableSize:   h2.MaxEncoderHeaderTableSize,
+		MaxDecoderHeaderTableSize:   h2.MaxDecoderHeaderTableSize,
+		MaxReadFrameSize:            h2.MaxReadFrameSize,
+		SendPingTimeout:             h2.ReadIdleTimeout,
+		PingTimeout:                 h2.PingTimeout,
+		WriteByteTimeout:            h2.WriteByteTimeout,
 	}
 
 	// Unlike most config fields, where out-of-range values revert to the default,
@@ -81,7 +83,7 @@
 	}
 
 	if h2.t1 != nil {
-		fillNetHTTPTransportConfig(&conf, h2.t1)
+		fillNetHTTPConfig(&conf, h2.t1.HTTP2)
 	}
 	setConfigDefaults(&conf, false)
 	return conf
@@ -120,3 +122,48 @@
 	const typicalHeaders = 10   // conservative
 	return n + typicalHeaders*perFieldOverhead
 }
+
+func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
+	if h2 == nil {
+		return
+	}
+	if h2.MaxConcurrentStreams != 0 {
+		conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+	}
+	if http2ConfigStrictMaxConcurrentRequests(h2) {
+		conf.StrictMaxConcurrentRequests = true
+	}
+	if h2.MaxEncoderHeaderTableSize != 0 {
+		conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
+	}
+	if h2.MaxDecoderHeaderTableSize != 0 {
+		conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
+	}
+	if h2.MaxConcurrentStreams != 0 {
+		conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+	}
+	if h2.MaxReadFrameSize != 0 {
+		conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
+	}
+	if h2.MaxReceiveBufferPerConnection != 0 {
+		conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
+	}
+	if h2.MaxReceiveBufferPerStream != 0 {
+		conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
+	}
+	if h2.SendPingTimeout != 0 {
+		conf.SendPingTimeout = h2.SendPingTimeout
+	}
+	if h2.PingTimeout != 0 {
+		conf.PingTimeout = h2.PingTimeout
+	}
+	if h2.WriteByteTimeout != 0 {
+		conf.WriteByteTimeout = h2.WriteByteTimeout
+	}
+	if h2.PermitProhibitedCipherSuites {
+		conf.PermitProhibitedCipherSuites = true
+	}
+	if h2.CountError != nil {
+		conf.CountError = h2.CountError
+	}
+}
diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go
deleted file mode 100644
index 5b516c5..0000000
--- a/vendor/golang.org/x/net/http2/config_go124.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.24
-
-package http2
-
-import "net/http"
-
-// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2.
-func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {
-	fillNetHTTPConfig(conf, srv.HTTP2)
-}
-
-// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2.
-func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {
-	fillNetHTTPConfig(conf, tr.HTTP2)
-}
-
-func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
-	if h2 == nil {
-		return
-	}
-	if h2.MaxConcurrentStreams != 0 {
-		conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
-	}
-	if h2.MaxEncoderHeaderTableSize != 0 {
-		conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
-	}
-	if h2.MaxDecoderHeaderTableSize != 0 {
-		conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
-	}
-	if h2.MaxConcurrentStreams != 0 {
-		conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
-	}
-	if h2.MaxReadFrameSize != 0 {
-		conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
-	}
-	if h2.MaxReceiveBufferPerConnection != 0 {
-		conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
-	}
-	if h2.MaxReceiveBufferPerStream != 0 {
-		conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
-	}
-	if h2.SendPingTimeout != 0 {
-		conf.SendPingTimeout = h2.SendPingTimeout
-	}
-	if h2.PingTimeout != 0 {
-		conf.PingTimeout = h2.PingTimeout
-	}
-	if h2.WriteByteTimeout != 0 {
-		conf.WriteByteTimeout = h2.WriteByteTimeout
-	}
-	if h2.PermitProhibitedCipherSuites {
-		conf.PermitProhibitedCipherSuites = true
-	}
-	if h2.CountError != nil {
-		conf.CountError = h2.CountError
-	}
-}
diff --git a/vendor/golang.org/x/net/http2/config_go125.go b/vendor/golang.org/x/net/http2/config_go125.go
new file mode 100644
index 0000000..b4373fe
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config_go125.go
@@ -0,0 +1,15 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.26
+
+package http2
+
+import (
+	"net/http"
+)
+
+func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool {
+	return false
+}
diff --git a/vendor/golang.org/x/net/http2/config_go126.go b/vendor/golang.org/x/net/http2/config_go126.go
new file mode 100644
index 0000000..6b071c1
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config_go126.go
@@ -0,0 +1,15 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.26
+
+package http2
+
+import (
+	"net/http"
+)
+
+func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool {
+	return h2.StrictMaxConcurrentRequests
+}
diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go
deleted file mode 100644
index 060fd6c..0000000
--- a/vendor/golang.org/x/net/http2/config_pre_go124.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.24
-
-package http2
-
-import "net/http"
-
-// Pre-Go 1.24 fallback.
-// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24.
-
-func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {}
-
-func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {}
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
index 81faec7..93bcaab 100644
--- a/vendor/golang.org/x/net/http2/frame.go
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -39,7 +39,7 @@
 	FrameContinuation FrameType = 0x9
 )
 
-var frameName = map[FrameType]string{
+var frameNames = [...]string{
 	FrameData:         "DATA",
 	FrameHeaders:      "HEADERS",
 	FramePriority:     "PRIORITY",
@@ -53,10 +53,10 @@
 }
 
 func (t FrameType) String() string {
-	if s, ok := frameName[t]; ok {
-		return s
+	if int(t) < len(frameNames) {
+		return frameNames[t]
 	}
-	return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t))
+	return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", t)
 }
 
 // Flags is a bitmask of HTTP/2 flags.
@@ -124,7 +124,7 @@
 // might be 0).
 type frameParser func(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error)
 
-var frameParsers = map[FrameType]frameParser{
+var frameParsers = [...]frameParser{
 	FrameData:         parseDataFrame,
 	FrameHeaders:      parseHeadersFrame,
 	FramePriority:     parsePriorityFrame,
@@ -138,8 +138,8 @@
 }
 
 func typeFrameParser(t FrameType) frameParser {
-	if f := frameParsers[t]; f != nil {
-		return f
+	if int(t) < len(frameParsers) {
+		return frameParsers[t]
 	}
 	return parseUnknownFrame
 }
@@ -225,6 +225,11 @@
 	},
 }
 
+func invalidHTTP1LookingFrameHeader() FrameHeader {
+	fh, _ := readFrameHeader(make([]byte, frameHeaderLen), strings.NewReader("HTTP/1.1 "))
+	return fh
+}
+
 // ReadFrameHeader reads 9 bytes from r and returns a FrameHeader.
 // Most users should use Framer.ReadFrame instead.
 func ReadFrameHeader(r io.Reader) (FrameHeader, error) {
@@ -342,7 +347,7 @@
 func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) {
 	// Write the FrameHeader.
 	f.wbuf = append(f.wbuf[:0],
-		0, // 3 bytes of length, filled in in endWrite
+		0, // 3 bytes of length, filled in endWrite
 		0,
 		0,
 		byte(ftype),
@@ -503,10 +508,16 @@
 		return nil, err
 	}
 	if fh.Length > fr.maxReadSize {
+		if fh == invalidHTTP1LookingFrameHeader() {
+			return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge)
+		}
 		return nil, ErrFrameTooLarge
 	}
 	payload := fr.getReadBuf(fh.Length)
 	if _, err := io.ReadFull(fr.r, payload); err != nil {
+		if fh == invalidHTTP1LookingFrameHeader() {
+			return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err)
+		}
 		return nil, err
 	}
 	f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload)
@@ -1141,6 +1152,15 @@
 	PriorityParam
 }
 
+var defaultRFC9218Priority = PriorityParam{
+	incremental: 0,
+	urgency:     3,
+}
+
+// Note that HTTP/2 has had two different prioritization schemes, and
+// PriorityParam struct below is a superset of both schemes. The exported
+// symbols are from RFC 7540 and the non-exported ones are from RFC 9218.
+
 // PriorityParam are the stream prioritzation parameters.
 type PriorityParam struct {
 	// StreamDep is a 31-bit stream identifier for the
@@ -1156,6 +1176,20 @@
 	// the spec, "Add one to the value to obtain a weight between
 	// 1 and 256."
 	Weight uint8
+
+	// "The urgency (u) parameter value is Integer (see Section 3.3.1 of
+	// [STRUCTURED-FIELDS]), between 0 and 7 inclusive, in descending order of
+	// priority. The default is 3."
+	urgency uint8
+
+	// "The incremental (i) parameter value is Boolean (see Section 3.3.6 of
+	// [STRUCTURED-FIELDS]). It indicates if an HTTP response can be processed
+	// incrementally, i.e., provide some meaningful output as chunks of the
+	// response arrive."
+	//
+	// We use uint8 (i.e. 0 is false, 1 is true) instead of bool so we can
+	// avoid unnecessary type conversions and because either type takes 1 byte.
+	incremental uint8
 }
 
 func (p PriorityParam) IsZero() bool {
diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go
index 9933c9f..9921ca0 100644
--- a/vendor/golang.org/x/net/http2/gotrack.go
+++ b/vendor/golang.org/x/net/http2/gotrack.go
@@ -15,21 +15,32 @@
 	"runtime"
 	"strconv"
 	"sync"
+	"sync/atomic"
 )
 
 var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
 
+// Setting DebugGoroutines to false during a test to disable goroutine debugging
+// results in race detector complaints when a test leaves goroutines running before
+// returning. Tests shouldn't do this, of course, but when they do it generally shows
+// up as infrequent, hard-to-debug flakes. (See #66519.)
+//
+// Disable goroutine debugging during individual tests with an atomic bool.
+// (Note that it's safe to enable/disable debugging mid-test, so the actual race condition
+// here is harmless.)
+var disableDebugGoroutines atomic.Bool
+
 type goroutineLock uint64
 
 func newGoroutineLock() goroutineLock {
-	if !DebugGoroutines {
+	if !DebugGoroutines || disableDebugGoroutines.Load() {
 		return 0
 	}
 	return goroutineLock(curGoroutineID())
 }
 
 func (g goroutineLock) check() {
-	if !DebugGoroutines {
+	if !DebugGoroutines || disableDebugGoroutines.Load() {
 		return
 	}
 	if curGoroutineID() != uint64(g) {
@@ -38,7 +49,7 @@
 }
 
 func (g goroutineLock) checkNotOn() {
-	if !DebugGoroutines {
+	if !DebugGoroutines || disableDebugGoroutines.Load() {
 		return
 	}
 	if curGoroutineID() == uint64(g) {
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index 6c18ea2..105fe12 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -11,13 +11,10 @@
 // requires Go 1.6 or later)
 //
 // See https://http2.github.io/ for more information on HTTP/2.
-//
-// See https://http2.golang.org/ for a test server running this code.
 package http2 // import "golang.org/x/net/http2"
 
 import (
 	"bufio"
-	"context"
 	"crypto/tls"
 	"errors"
 	"fmt"
@@ -37,7 +34,6 @@
 	VerboseLogs    bool
 	logFrameWrites bool
 	logFrameReads  bool
-	inTests        bool
 
 	// Enabling extended CONNECT by causes browsers to attempt to use
 	// WebSockets-over-HTTP/2. This results in problems when the server's websocket
@@ -257,15 +253,13 @@
 // idle memory usage with many connections.
 type bufferedWriter struct {
 	_           incomparable
-	group       synctestGroupInterface // immutable
-	conn        net.Conn               // immutable
-	bw          *bufio.Writer          // non-nil when data is buffered
-	byteTimeout time.Duration          // immutable, WriteByteTimeout
+	conn        net.Conn      // immutable
+	bw          *bufio.Writer // non-nil when data is buffered
+	byteTimeout time.Duration // immutable, WriteByteTimeout
 }
 
-func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter {
+func newBufferedWriter(conn net.Conn, timeout time.Duration) *bufferedWriter {
 	return &bufferedWriter{
-		group:       group,
 		conn:        conn,
 		byteTimeout: timeout,
 	}
@@ -316,24 +310,18 @@
 type bufferedWriterTimeoutWriter bufferedWriter
 
 func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) {
-	return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p)
+	return writeWithByteTimeout(w.conn, w.byteTimeout, p)
 }
 
 // writeWithByteTimeout writes to conn.
 // If more than timeout passes without any bytes being written to the connection,
 // the write fails.
-func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
+func writeWithByteTimeout(conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
 	if timeout <= 0 {
 		return conn.Write(p)
 	}
 	for {
-		var now time.Time
-		if group == nil {
-			now = time.Now()
-		} else {
-			now = group.Now()
-		}
-		conn.SetWriteDeadline(now.Add(timeout))
+		conn.SetWriteDeadline(time.Now().Add(timeout))
 		nn, err := conn.Write(p[n:])
 		n += nn
 		if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) {
@@ -419,14 +407,3 @@
 // makes that struct also non-comparable, and generally doesn't add
 // any size (as long as it's first).
 type incomparable [0]func()
-
-// synctestGroupInterface is the methods of synctestGroup used by Server and Transport.
-// It's defined as an interface here to let us keep synctestGroup entirely test-only
-// and not a part of non-test builds.
-type synctestGroupInterface interface {
-	Join()
-	Now() time.Time
-	NewTimer(d time.Duration) timer
-	AfterFunc(d time.Duration, f func()) timer
-	ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc)
-}
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index b640deb..bdc5520 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -176,44 +176,15 @@
 	// so that we don't embed a Mutex in this struct, which will make the
 	// struct non-copyable, which might break some callers.
 	state *serverInternalState
-
-	// Synchronization group used for testing.
-	// Outside of tests, this is nil.
-	group synctestGroupInterface
-}
-
-func (s *Server) markNewGoroutine() {
-	if s.group != nil {
-		s.group.Join()
-	}
-}
-
-func (s *Server) now() time.Time {
-	if s.group != nil {
-		return s.group.Now()
-	}
-	return time.Now()
-}
-
-// newTimer creates a new time.Timer, or a synthetic timer in tests.
-func (s *Server) newTimer(d time.Duration) timer {
-	if s.group != nil {
-		return s.group.NewTimer(d)
-	}
-	return timeTimer{time.NewTimer(d)}
-}
-
-// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
-func (s *Server) afterFunc(d time.Duration, f func()) timer {
-	if s.group != nil {
-		return s.group.AfterFunc(d, f)
-	}
-	return timeTimer{time.AfterFunc(d, f)}
 }
 
 type serverInternalState struct {
 	mu          sync.Mutex
 	activeConns map[*serverConn]struct{}
+
+	// Pool of error channels. This is per-Server rather than global
+	// because channels can't be reused across synctest bubbles.
+	errChanPool sync.Pool
 }
 
 func (s *serverInternalState) registerConn(sc *serverConn) {
@@ -245,6 +216,27 @@
 	s.mu.Unlock()
 }
 
+// Global error channel pool used for uninitialized Servers.
+// We use a per-Server pool when possible to avoid using channels across synctest bubbles.
+var errChanPool = sync.Pool{
+	New: func() any { return make(chan error, 1) },
+}
+
+func (s *serverInternalState) getErrChan() chan error {
+	if s == nil {
+		return errChanPool.Get().(chan error) // Server used without calling ConfigureServer
+	}
+	return s.errChanPool.Get().(chan error)
+}
+
+func (s *serverInternalState) putErrChan(ch chan error) {
+	if s == nil {
+		errChanPool.Put(ch) // Server used without calling ConfigureServer
+		return
+	}
+	s.errChanPool.Put(ch)
+}
+
 // ConfigureServer adds HTTP/2 support to a net/http Server.
 //
 // The configuration conf may be nil.
@@ -257,7 +249,10 @@
 	if conf == nil {
 		conf = new(Server)
 	}
-	conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
+	conf.state = &serverInternalState{
+		activeConns: make(map[*serverConn]struct{}),
+		errChanPool: sync.Pool{New: func() any { return make(chan error, 1) }},
+	}
 	if h1, h2 := s, conf; h2.IdleTimeout == 0 {
 		if h1.IdleTimeout != 0 {
 			h2.IdleTimeout = h1.IdleTimeout
@@ -423,6 +418,9 @@
 //
 // The opts parameter is optional. If nil, default values are used.
 func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
+	if opts == nil {
+		opts = &ServeConnOpts{}
+	}
 	s.serveConn(c, opts, nil)
 }
 
@@ -438,7 +436,7 @@
 		conn:                        c,
 		baseCtx:                     baseCtx,
 		remoteAddrStr:               c.RemoteAddr().String(),
-		bw:                          newBufferedWriter(s.group, c, conf.WriteByteTimeout),
+		bw:                          newBufferedWriter(c, conf.WriteByteTimeout),
 		handler:                     opts.handler(),
 		streams:                     make(map[uint32]*stream),
 		readFrameCh:                 make(chan readFrameResult),
@@ -638,11 +636,11 @@
 	pingSent                    bool
 	sentPingData                [8]byte
 	goAwayCode                  ErrCode
-	shutdownTimer               timer // nil until used
-	idleTimer                   timer // nil if unused
+	shutdownTimer               *time.Timer // nil until used
+	idleTimer                   *time.Timer // nil if unused
 	readIdleTimeout             time.Duration
 	pingTimeout                 time.Duration
-	readIdleTimer               timer // nil if unused
+	readIdleTimer               *time.Timer // nil if unused
 
 	// Owned by the writeFrameAsync goroutine:
 	headerWriteBuf bytes.Buffer
@@ -687,12 +685,12 @@
 	flow             outflow // limits writing from Handler to client
 	inflow           inflow  // what the client is allowed to POST/etc to us
 	state            streamState
-	resetQueued      bool  // RST_STREAM queued for write; set by sc.resetStream
-	gotTrailerHeader bool  // HEADER frame for trailers was seen
-	wroteHeaders     bool  // whether we wrote headers (not status 100)
-	readDeadline     timer // nil if unused
-	writeDeadline    timer // nil if unused
-	closeErr         error // set before cw is closed
+	resetQueued      bool        // RST_STREAM queued for write; set by sc.resetStream
+	gotTrailerHeader bool        // HEADER frame for trailers was seen
+	wroteHeaders     bool        // whether we wrote headers (not status 100)
+	readDeadline     *time.Timer // nil if unused
+	writeDeadline    *time.Timer // nil if unused
+	closeErr         error       // set before cw is closed
 
 	trailer    http.Header // accumulated trailers
 	reqTrailer http.Header // handler's Request.Trailer
@@ -848,7 +846,6 @@
 // consumer is done with the frame.
 // It's run on its own goroutine.
 func (sc *serverConn) readFrames() {
-	sc.srv.markNewGoroutine()
 	gate := make(chan struct{})
 	gateDone := func() { gate <- struct{}{} }
 	for {
@@ -881,7 +878,6 @@
 // At most one goroutine can be running writeFrameAsync at a time per
 // serverConn.
 func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
-	sc.srv.markNewGoroutine()
 	var err error
 	if wd == nil {
 		err = wr.write.writeFrame(sc)
@@ -965,22 +961,22 @@
 	sc.setConnState(http.StateIdle)
 
 	if sc.srv.IdleTimeout > 0 {
-		sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
+		sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
 		defer sc.idleTimer.Stop()
 	}
 
 	if conf.SendPingTimeout > 0 {
 		sc.readIdleTimeout = conf.SendPingTimeout
-		sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
+		sc.readIdleTimer = time.AfterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
 		defer sc.readIdleTimer.Stop()
 	}
 
 	go sc.readFrames() // closed by defer sc.conn.Close above
 
-	settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer)
+	settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
 	defer settingsTimer.Stop()
 
-	lastFrameTime := sc.srv.now()
+	lastFrameTime := time.Now()
 	loopNum := 0
 	for {
 		loopNum++
@@ -994,7 +990,7 @@
 		case res := <-sc.wroteFrameCh:
 			sc.wroteFrame(res)
 		case res := <-sc.readFrameCh:
-			lastFrameTime = sc.srv.now()
+			lastFrameTime = time.Now()
 			// Process any written frames before reading new frames from the client since a
 			// written frame could have triggered a new stream to be started.
 			if sc.writingFrameAsync {
@@ -1068,13 +1064,16 @@
 
 func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
 	if sc.pingSent {
-		sc.vlogf("timeout waiting for PING response")
+		sc.logf("timeout waiting for PING response")
+		if f := sc.countErrorFunc; f != nil {
+			f("conn_close_lost_ping")
+		}
 		sc.conn.Close()
 		return
 	}
 
 	pingAt := lastFrameReadTime.Add(sc.readIdleTimeout)
-	now := sc.srv.now()
+	now := time.Now()
 	if pingAt.After(now) {
 		// We received frames since arming the ping timer.
 		// Reset it for the next possible timeout.
@@ -1138,10 +1137,10 @@
 			errc <- nil
 		}
 	}()
-	timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server?
+	timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
 	defer timer.Stop()
 	select {
-	case <-timer.C():
+	case <-timer.C:
 		return errPrefaceTimeout
 	case err := <-errc:
 		if err == nil {
@@ -1153,10 +1152,6 @@
 	}
 }
 
-var errChanPool = sync.Pool{
-	New: func() interface{} { return make(chan error, 1) },
-}
-
 var writeDataPool = sync.Pool{
 	New: func() interface{} { return new(writeData) },
 }
@@ -1164,7 +1159,7 @@
 // writeDataFromHandler writes DATA response frames from a handler on
 // the given stream.
 func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
-	ch := errChanPool.Get().(chan error)
+	ch := sc.srv.state.getErrChan()
 	writeArg := writeDataPool.Get().(*writeData)
 	*writeArg = writeData{stream.id, data, endStream}
 	err := sc.writeFrameFromHandler(FrameWriteRequest{
@@ -1196,7 +1191,7 @@
 			return errStreamClosed
 		}
 	}
-	errChanPool.Put(ch)
+	sc.srv.state.putErrChan(ch)
 	if frameWriteDone {
 		writeDataPool.Put(writeArg)
 	}
@@ -1510,7 +1505,7 @@
 
 func (sc *serverConn) shutDownIn(d time.Duration) {
 	sc.serveG.check()
-	sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer)
+	sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
 }
 
 func (sc *serverConn) resetStream(se StreamError) {
@@ -2115,7 +2110,7 @@
 	// (in Go 1.8), though. That's a more sane option anyway.
 	if sc.hs.ReadTimeout > 0 {
 		sc.conn.SetReadDeadline(time.Time{})
-		st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
+		st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
 	}
 
 	return sc.scheduleHandler(id, rw, req, handler)
@@ -2213,7 +2208,7 @@
 	st.flow.add(sc.initialStreamSendWindowSize)
 	st.inflow.init(sc.initialStreamRecvWindowSize)
 	if sc.hs.WriteTimeout > 0 {
-		st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
+		st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
 	}
 
 	sc.streams[id] = st
@@ -2402,7 +2397,6 @@
 
 // Run on its own goroutine.
 func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
-	sc.srv.markNewGoroutine()
 	defer sc.sendServeMsg(handlerDoneMsg)
 	didPanic := true
 	defer func() {
@@ -2451,7 +2445,7 @@
 		// waiting for this frame to be written, so an http.Flush mid-handler
 		// writes out the correct value of keys, before a handler later potentially
 		// mutates it.
-		errc = errChanPool.Get().(chan error)
+		errc = sc.srv.state.getErrChan()
 	}
 	if err := sc.writeFrameFromHandler(FrameWriteRequest{
 		write:  headerData,
@@ -2463,7 +2457,7 @@
 	if errc != nil {
 		select {
 		case err := <-errc:
-			errChanPool.Put(errc)
+			sc.srv.state.putErrChan(errc)
 			return err
 		case <-sc.doneServing:
 			return errClientDisconnected
@@ -2570,7 +2564,7 @@
 	if err == io.EOF {
 		b.sawEOF = true
 	}
-	if b.conn == nil && inTests {
+	if b.conn == nil {
 		return
 	}
 	b.conn.noteBodyReadFromHandler(b.stream, n, err)
@@ -2699,7 +2693,7 @@
 		var date string
 		if _, ok := rws.snapHeader["Date"]; !ok {
 			// TODO(bradfitz): be faster here, like net/http? measure.
-			date = rws.conn.srv.now().UTC().Format(http.TimeFormat)
+			date = time.Now().UTC().Format(http.TimeFormat)
 		}
 
 		for _, v := range rws.snapHeader["Trailer"] {
@@ -2821,7 +2815,7 @@
 
 func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
 	st := w.rws.stream
-	if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
+	if !deadline.IsZero() && deadline.Before(time.Now()) {
 		// If we're setting a deadline in the past, reset the stream immediately
 		// so writes after SetWriteDeadline returns will fail.
 		st.onReadTimeout()
@@ -2837,9 +2831,9 @@
 		if deadline.IsZero() {
 			st.readDeadline = nil
 		} else if st.readDeadline == nil {
-			st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout)
+			st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
 		} else {
-			st.readDeadline.Reset(deadline.Sub(sc.srv.now()))
+			st.readDeadline.Reset(deadline.Sub(time.Now()))
 		}
 	})
 	return nil
@@ -2847,7 +2841,7 @@
 
 func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
 	st := w.rws.stream
-	if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
+	if !deadline.IsZero() && deadline.Before(time.Now()) {
 		// If we're setting a deadline in the past, reset the stream immediately
 		// so writes after SetWriteDeadline returns will fail.
 		st.onWriteTimeout()
@@ -2863,9 +2857,9 @@
 		if deadline.IsZero() {
 			st.writeDeadline = nil
 		} else if st.writeDeadline == nil {
-			st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout)
+			st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
 		} else {
-			st.writeDeadline.Reset(deadline.Sub(sc.srv.now()))
+			st.writeDeadline.Reset(deadline.Sub(time.Now()))
 		}
 	})
 	return nil
@@ -3144,7 +3138,7 @@
 		method: opts.Method,
 		url:    u,
 		header: cloneHeader(opts.Header),
-		done:   errChanPool.Get().(chan error),
+		done:   sc.srv.state.getErrChan(),
 	}
 
 	select {
@@ -3161,7 +3155,7 @@
 	case <-st.cw:
 		return errStreamClosed
 	case err := <-msg.done:
-		errChanPool.Put(msg.done)
+		sc.srv.state.putErrChan(msg.done)
 		return err
 	}
 }
diff --git a/vendor/golang.org/x/net/http2/timer.go b/vendor/golang.org/x/net/http2/timer.go
deleted file mode 100644
index 0b1c17b..0000000
--- a/vendor/golang.org/x/net/http2/timer.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-package http2
-
-import "time"
-
-// A timer is a time.Timer, as an interface which can be replaced in tests.
-type timer = interface {
-	C() <-chan time.Time
-	Reset(d time.Duration) bool
-	Stop() bool
-}
-
-// timeTimer adapts a time.Timer to the timer interface.
-type timeTimer struct {
-	*time.Timer
-}
-
-func (t timeTimer) C() <-chan time.Time { return t.Timer.C }
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index f26356b..be759b6 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -193,50 +193,6 @@
 
 type transportTestHooks struct {
 	newclientconn func(*ClientConn)
-	group         synctestGroupInterface
-}
-
-func (t *Transport) markNewGoroutine() {
-	if t != nil && t.transportTestHooks != nil {
-		t.transportTestHooks.group.Join()
-	}
-}
-
-func (t *Transport) now() time.Time {
-	if t != nil && t.transportTestHooks != nil {
-		return t.transportTestHooks.group.Now()
-	}
-	return time.Now()
-}
-
-func (t *Transport) timeSince(when time.Time) time.Duration {
-	if t != nil && t.transportTestHooks != nil {
-		return t.now().Sub(when)
-	}
-	return time.Since(when)
-}
-
-// newTimer creates a new time.Timer, or a synthetic timer in tests.
-func (t *Transport) newTimer(d time.Duration) timer {
-	if t.transportTestHooks != nil {
-		return t.transportTestHooks.group.NewTimer(d)
-	}
-	return timeTimer{time.NewTimer(d)}
-}
-
-// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
-func (t *Transport) afterFunc(d time.Duration, f func()) timer {
-	if t.transportTestHooks != nil {
-		return t.transportTestHooks.group.AfterFunc(d, f)
-	}
-	return timeTimer{time.AfterFunc(d, f)}
-}
-
-func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
-	if t.transportTestHooks != nil {
-		return t.transportTestHooks.group.ContextWithTimeout(ctx, d)
-	}
-	return context.WithTimeout(ctx, d)
 }
 
 func (t *Transport) maxHeaderListSize() uint32 {
@@ -366,7 +322,7 @@
 	readerErr  error         // set before readerDone is closed
 
 	idleTimeout time.Duration // or 0 for never
-	idleTimer   timer
+	idleTimer   *time.Timer
 
 	mu               sync.Mutex // guards following
 	cond             *sync.Cond // hold mu; broadcast on flow/closed changes
@@ -399,6 +355,7 @@
 	readIdleTimeout             time.Duration
 	pingTimeout                 time.Duration
 	extendedConnectAllowed      bool
+	strictMaxConcurrentStreams  bool
 
 	// rstStreamPingsBlocked works around an unfortunate gRPC behavior.
 	// gRPC strictly limits the number of PING frames that it will receive.
@@ -534,14 +491,12 @@
 	cs.reqBodyClosed = make(chan struct{})
 	reqBodyClosed := cs.reqBodyClosed
 	go func() {
-		cs.cc.t.markNewGoroutine()
 		cs.reqBody.Close()
 		close(reqBodyClosed)
 	}()
 }
 
 type stickyErrWriter struct {
-	group   synctestGroupInterface
 	conn    net.Conn
 	timeout time.Duration
 	err     *error
@@ -551,7 +506,7 @@
 	if *sew.err != nil {
 		return 0, *sew.err
 	}
-	n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p)
+	n, err = writeWithByteTimeout(sew.conn, sew.timeout, p)
 	*sew.err = err
 	return n, err
 }
@@ -650,9 +605,9 @@
 				backoff := float64(uint(1) << (uint(retry) - 1))
 				backoff += backoff * (0.1 * mathrand.Float64())
 				d := time.Second * time.Duration(backoff)
-				tm := t.newTimer(d)
+				tm := time.NewTimer(d)
 				select {
-				case <-tm.C():
+				case <-tm.C:
 					t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
 					continue
 				case <-req.Context().Done():
@@ -699,6 +654,7 @@
 	errClientConnUnusable       = errors.New("http2: client conn not usable")
 	errClientConnNotEstablished = errors.New("http2: client conn could not be established")
 	errClientConnGotGoAway      = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
+	errClientConnForceClosed    = errors.New("http2: client connection force closed via ClientConn.Close")
 )
 
 // shouldRetryRequest is called by RoundTrip when a request fails to get
@@ -829,7 +785,8 @@
 		initialWindowSize:           65535,    // spec default
 		initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
 		maxConcurrentStreams:        initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
-		peerMaxHeaderListSize:       0xffffffffffffffff,          // "infinite", per spec. Use 2^64-1 instead.
+		strictMaxConcurrentStreams:  conf.StrictMaxConcurrentRequests,
+		peerMaxHeaderListSize:       0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
 		streams:                     make(map[uint32]*clientStream),
 		singleUse:                   singleUse,
 		seenSettingsChan:            make(chan struct{}),
@@ -838,14 +795,11 @@
 		pingTimeout:                 conf.PingTimeout,
 		pings:                       make(map[[8]byte]chan struct{}),
 		reqHeaderMu:                 make(chan struct{}, 1),
-		lastActive:                  t.now(),
+		lastActive:                  time.Now(),
 	}
-	var group synctestGroupInterface
 	if t.transportTestHooks != nil {
-		t.markNewGoroutine()
 		t.transportTestHooks.newclientconn(cc)
 		c = cc.tconn
-		group = t.group
 	}
 	if VerboseLogs {
 		t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
@@ -857,7 +811,6 @@
 	// TODO: adjust this writer size to account for frame size +
 	// MTU + crypto/tls record padding.
 	cc.bw = bufio.NewWriter(stickyErrWriter{
-		group:   group,
 		conn:    c,
 		timeout: conf.WriteByteTimeout,
 		err:     &cc.werr,
@@ -906,7 +859,7 @@
 	// Start the idle timer after the connection is fully initialized.
 	if d := t.idleConnTimeout(); d != 0 {
 		cc.idleTimeout = d
-		cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout)
+		cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
 	}
 
 	go cc.readLoop()
@@ -917,7 +870,7 @@
 	pingTimeout := cc.pingTimeout
 	// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
 	// trigger the healthCheck again if there is no frame received.
-	ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout)
+	ctx, cancel := context.WithTimeout(context.Background(), pingTimeout)
 	defer cancel()
 	cc.vlogf("http2: Transport sending health check")
 	err := cc.Ping(ctx)
@@ -1067,7 +1020,7 @@
 		return
 	}
 	var maxConcurrentOkay bool
-	if cc.t.StrictMaxConcurrentStreams {
+	if cc.strictMaxConcurrentStreams {
 		// We'll tell the caller we can take a new request to
 		// prevent the caller from dialing a new TCP
 		// connection, but then we'll block later before
@@ -1120,7 +1073,7 @@
 	// times are compared based on their wall time. We don't want
 	// to reuse a connection that's been sitting idle during
 	// VM/laptop suspend if monotonic time was also frozen.
-	return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout
+	return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout
 }
 
 // onIdleTimeout is called from a time.AfterFunc goroutine. It will
@@ -1186,7 +1139,6 @@
 	done := make(chan struct{})
 	cancelled := false // guarded by cc.mu
 	go func() {
-		cc.t.markNewGoroutine()
 		cc.mu.Lock()
 		defer cc.mu.Unlock()
 		for {
@@ -1257,8 +1209,7 @@
 //
 // In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
 func (cc *ClientConn) Close() error {
-	err := errors.New("http2: client connection force closed via ClientConn.Close")
-	cc.closeForError(err)
+	cc.closeForError(errClientConnForceClosed)
 	return nil
 }
 
@@ -1427,7 +1378,6 @@
 //
 // It sends the request and performs post-request cleanup (closing Request.Body, etc.).
 func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) {
-	cs.cc.t.markNewGoroutine()
 	err := cs.writeRequest(req, streamf)
 	cs.cleanupWriteRequest(err)
 }
@@ -1558,9 +1508,9 @@
 	var respHeaderTimer <-chan time.Time
 	var respHeaderRecv chan struct{}
 	if d := cc.responseHeaderTimeout(); d != 0 {
-		timer := cc.t.newTimer(d)
+		timer := time.NewTimer(d)
 		defer timer.Stop()
-		respHeaderTimer = timer.C()
+		respHeaderTimer = timer.C
 		respHeaderRecv = cs.respHeaderRecv
 	}
 	// Wait until the peer half-closes its end of the stream,
@@ -1753,7 +1703,7 @@
 			// Return a fatal error which aborts the retry loop.
 			return errClientConnNotEstablished
 		}
-		cc.lastActive = cc.t.now()
+		cc.lastActive = time.Now()
 		if cc.closed || !cc.canTakeNewRequestLocked() {
 			return errClientConnUnusable
 		}
@@ -2092,10 +2042,10 @@
 	if len(cc.streams) != slen-1 {
 		panic("forgetting unknown stream id")
 	}
-	cc.lastActive = cc.t.now()
+	cc.lastActive = time.Now()
 	if len(cc.streams) == 0 && cc.idleTimer != nil {
 		cc.idleTimer.Reset(cc.idleTimeout)
-		cc.lastIdle = cc.t.now()
+		cc.lastIdle = time.Now()
 	}
 	// Wake up writeRequestBody via clientStream.awaitFlowControl and
 	// wake up RoundTrip if there is a pending request.
@@ -2121,7 +2071,6 @@
 
 // readLoop runs in its own goroutine and reads and dispatches frames.
 func (cc *ClientConn) readLoop() {
-	cc.t.markNewGoroutine()
 	rl := &clientConnReadLoop{cc: cc}
 	defer rl.cleanup()
 	cc.readerErr = rl.run()
@@ -2188,9 +2137,9 @@
 	if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout {
 		unusedWaitTime = cc.idleTimeout
 	}
-	idleTime := cc.t.now().Sub(cc.lastActive)
+	idleTime := time.Now().Sub(cc.lastActive)
 	if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle {
-		cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() {
+		cc.idleTimer = time.AfterFunc(unusedWaitTime-idleTime, func() {
 			cc.t.connPool().MarkDead(cc)
 		})
 	} else {
@@ -2250,9 +2199,9 @@
 	cc := rl.cc
 	gotSettings := false
 	readIdleTimeout := cc.readIdleTimeout
-	var t timer
+	var t *time.Timer
 	if readIdleTimeout != 0 {
-		t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck)
+		t = time.AfterFunc(readIdleTimeout, cc.healthCheck)
 	}
 	for {
 		f, err := cc.fr.ReadFrame()
@@ -2998,7 +2947,6 @@
 	var pingError error
 	errc := make(chan struct{})
 	go func() {
-		cc.t.markNewGoroutine()
 		cc.wmu.Lock()
 		defer cc.wmu.Unlock()
 		if pingError = cc.fr.WritePing(false, p); pingError != nil {
@@ -3228,7 +3176,7 @@
 	cc.mu.Lock()
 	ci.WasIdle = len(cc.streams) == 0 && reused
 	if ci.WasIdle && !cc.lastActive.IsZero() {
-		ci.IdleTime = cc.t.timeSince(cc.lastActive)
+		ci.IdleTime = time.Since(cc.lastActive)
 	}
 	cc.mu.Unlock()
 
diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go
index cc893ad..4d3890f 100644
--- a/vendor/golang.org/x/net/http2/writesched.go
+++ b/vendor/golang.org/x/net/http2/writesched.go
@@ -42,6 +42,8 @@
 	// PusherID is zero if the stream was initiated by the client. Otherwise,
 	// PusherID names the stream that pushed the newly opened stream.
 	PusherID uint32
+	// priority is used to set the priority of the newly opened stream.
+	priority PriorityParam
 }
 
 // FrameWriteRequest is a request to write a frame.
diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go
similarity index 77%
rename from vendor/golang.org/x/net/http2/writesched_priority.go
rename to vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go
index f678333..6d24d6a 100644
--- a/vendor/golang.org/x/net/http2/writesched_priority.go
+++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go
@@ -11,7 +11,7 @@
 )
 
 // RFC 7540, Section 5.3.5: the default weight is 16.
-const priorityDefaultWeight = 15 // 16 = 15 + 1
+const priorityDefaultWeightRFC7540 = 15 // 16 = 15 + 1
 
 // PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
 type PriorityWriteSchedulerConfig struct {
@@ -66,8 +66,8 @@
 		}
 	}
 
-	ws := &priorityWriteScheduler{
-		nodes:                make(map[uint32]*priorityNode),
+	ws := &priorityWriteSchedulerRFC7540{
+		nodes:                make(map[uint32]*priorityNodeRFC7540),
 		maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
 		maxIdleNodesInTree:   cfg.MaxIdleNodesInTree,
 		enableWriteThrottle:  cfg.ThrottleOutOfOrderWrites,
@@ -81,32 +81,32 @@
 	return ws
 }
 
-type priorityNodeState int
+type priorityNodeStateRFC7540 int
 
 const (
-	priorityNodeOpen priorityNodeState = iota
-	priorityNodeClosed
-	priorityNodeIdle
+	priorityNodeOpenRFC7540 priorityNodeStateRFC7540 = iota
+	priorityNodeClosedRFC7540
+	priorityNodeIdleRFC7540
 )
 
-// priorityNode is a node in an HTTP/2 priority tree.
+// priorityNodeRFC7540 is a node in an HTTP/2 priority tree.
 // Each node is associated with a single stream ID.
 // See RFC 7540, Section 5.3.
-type priorityNode struct {
-	q            writeQueue        // queue of pending frames to write
-	id           uint32            // id of the stream, or 0 for the root of the tree
-	weight       uint8             // the actual weight is weight+1, so the value is in [1,256]
-	state        priorityNodeState // open | closed | idle
-	bytes        int64             // number of bytes written by this node, or 0 if closed
-	subtreeBytes int64             // sum(node.bytes) of all nodes in this subtree
+type priorityNodeRFC7540 struct {
+	q            writeQueue               // queue of pending frames to write
+	id           uint32                   // id of the stream, or 0 for the root of the tree
+	weight       uint8                    // the actual weight is weight+1, so the value is in [1,256]
+	state        priorityNodeStateRFC7540 // open | closed | idle
+	bytes        int64                    // number of bytes written by this node, or 0 if closed
+	subtreeBytes int64                    // sum(node.bytes) of all nodes in this subtree
 
 	// These links form the priority tree.
-	parent     *priorityNode
-	kids       *priorityNode // start of the kids list
-	prev, next *priorityNode // doubly-linked list of siblings
+	parent     *priorityNodeRFC7540
+	kids       *priorityNodeRFC7540 // start of the kids list
+	prev, next *priorityNodeRFC7540 // doubly-linked list of siblings
 }
 
-func (n *priorityNode) setParent(parent *priorityNode) {
+func (n *priorityNodeRFC7540) setParent(parent *priorityNodeRFC7540) {
 	if n == parent {
 		panic("setParent to self")
 	}
@@ -141,7 +141,7 @@
 	}
 }
 
-func (n *priorityNode) addBytes(b int64) {
+func (n *priorityNodeRFC7540) addBytes(b int64) {
 	n.bytes += b
 	for ; n != nil; n = n.parent {
 		n.subtreeBytes += b
@@ -154,7 +154,7 @@
 //
 // f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
 // if any ancestor p of n is still open (ignoring the root node).
-func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool {
+func (n *priorityNodeRFC7540) walkReadyInOrder(openParent bool, tmp *[]*priorityNodeRFC7540, f func(*priorityNodeRFC7540, bool) bool) bool {
 	if !n.q.empty() && f(n, openParent) {
 		return true
 	}
@@ -165,7 +165,7 @@
 	// Don't consider the root "open" when updating openParent since
 	// we can't send data frames on the root stream (only control frames).
 	if n.id != 0 {
-		openParent = openParent || (n.state == priorityNodeOpen)
+		openParent = openParent || (n.state == priorityNodeOpenRFC7540)
 	}
 
 	// Common case: only one kid or all kids have the same weight.
@@ -195,7 +195,7 @@
 		*tmp = append(*tmp, n.kids)
 		n.kids.setParent(nil)
 	}
-	sort.Sort(sortPriorityNodeSiblings(*tmp))
+	sort.Sort(sortPriorityNodeSiblingsRFC7540(*tmp))
 	for i := len(*tmp) - 1; i >= 0; i-- {
 		(*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
 	}
@@ -207,11 +207,11 @@
 	return false
 }
 
-type sortPriorityNodeSiblings []*priorityNode
+type sortPriorityNodeSiblingsRFC7540 []*priorityNodeRFC7540
 
-func (z sortPriorityNodeSiblings) Len() int      { return len(z) }
-func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
-func (z sortPriorityNodeSiblings) Less(i, k int) bool {
+func (z sortPriorityNodeSiblingsRFC7540) Len() int      { return len(z) }
+func (z sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
+func (z sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool {
 	// Prefer the subtree that has sent fewer bytes relative to its weight.
 	// See sections 5.3.2 and 5.3.4.
 	wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
@@ -225,13 +225,13 @@
 	return bi/bk <= wi/wk
 }
 
-type priorityWriteScheduler struct {
+type priorityWriteSchedulerRFC7540 struct {
 	// root is the root of the priority tree, where root.id = 0.
 	// The root queues control frames that are not associated with any stream.
-	root priorityNode
+	root priorityNodeRFC7540
 
 	// nodes maps stream ids to priority tree nodes.
-	nodes map[uint32]*priorityNode
+	nodes map[uint32]*priorityNodeRFC7540
 
 	// maxID is the maximum stream id in nodes.
 	maxID uint32
@@ -239,7 +239,7 @@
 	// lists of nodes that have been closed or are idle, but are kept in
 	// the tree for improved prioritization. When the lengths exceed either
 	// maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
-	closedNodes, idleNodes []*priorityNode
+	closedNodes, idleNodes []*priorityNodeRFC7540
 
 	// From the config.
 	maxClosedNodesInTree int
@@ -248,19 +248,19 @@
 	enableWriteThrottle  bool
 
 	// tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
-	tmp []*priorityNode
+	tmp []*priorityNodeRFC7540
 
 	// pool of empty queues for reuse.
 	queuePool writeQueuePool
 }
 
-func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
+func (ws *priorityWriteSchedulerRFC7540) OpenStream(streamID uint32, options OpenStreamOptions) {
 	// The stream may be currently idle but cannot be opened or closed.
 	if curr := ws.nodes[streamID]; curr != nil {
-		if curr.state != priorityNodeIdle {
+		if curr.state != priorityNodeIdleRFC7540 {
 			panic(fmt.Sprintf("stream %d already opened", streamID))
 		}
-		curr.state = priorityNodeOpen
+		curr.state = priorityNodeOpenRFC7540
 		return
 	}
 
@@ -272,11 +272,11 @@
 	if parent == nil {
 		parent = &ws.root
 	}
-	n := &priorityNode{
+	n := &priorityNodeRFC7540{
 		q:      *ws.queuePool.get(),
 		id:     streamID,
-		weight: priorityDefaultWeight,
-		state:  priorityNodeOpen,
+		weight: priorityDefaultWeightRFC7540,
+		state:  priorityNodeOpenRFC7540,
 	}
 	n.setParent(parent)
 	ws.nodes[streamID] = n
@@ -285,19 +285,19 @@
 	}
 }
 
-func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
+func (ws *priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) {
 	if streamID == 0 {
 		panic("violation of WriteScheduler interface: cannot close stream 0")
 	}
 	if ws.nodes[streamID] == nil {
 		panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
 	}
-	if ws.nodes[streamID].state != priorityNodeOpen {
+	if ws.nodes[streamID].state != priorityNodeOpenRFC7540 {
 		panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
 	}
 
 	n := ws.nodes[streamID]
-	n.state = priorityNodeClosed
+	n.state = priorityNodeClosedRFC7540
 	n.addBytes(-n.bytes)
 
 	q := n.q
@@ -310,7 +310,7 @@
 	}
 }
 
-func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
+func (ws *priorityWriteSchedulerRFC7540) AdjustStream(streamID uint32, priority PriorityParam) {
 	if streamID == 0 {
 		panic("adjustPriority on root")
 	}
@@ -324,11 +324,11 @@
 			return
 		}
 		ws.maxID = streamID
-		n = &priorityNode{
+		n = &priorityNodeRFC7540{
 			q:      *ws.queuePool.get(),
 			id:     streamID,
-			weight: priorityDefaultWeight,
-			state:  priorityNodeIdle,
+			weight: priorityDefaultWeightRFC7540,
+			state:  priorityNodeIdleRFC7540,
 		}
 		n.setParent(&ws.root)
 		ws.nodes[streamID] = n
@@ -340,7 +340,7 @@
 	parent := ws.nodes[priority.StreamDep]
 	if parent == nil {
 		n.setParent(&ws.root)
-		n.weight = priorityDefaultWeight
+		n.weight = priorityDefaultWeightRFC7540
 		return
 	}
 
@@ -381,8 +381,8 @@
 	n.weight = priority.Weight
 }
 
-func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
-	var n *priorityNode
+func (ws *priorityWriteSchedulerRFC7540) Push(wr FrameWriteRequest) {
+	var n *priorityNodeRFC7540
 	if wr.isControl() {
 		n = &ws.root
 	} else {
@@ -401,8 +401,8 @@
 	n.q.push(wr)
 }
 
-func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
-	ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool {
+func (ws *priorityWriteSchedulerRFC7540) Pop() (wr FrameWriteRequest, ok bool) {
+	ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNodeRFC7540, openParent bool) bool {
 		limit := int32(math.MaxInt32)
 		if openParent {
 			limit = ws.writeThrottleLimit
@@ -428,7 +428,7 @@
 	return wr, ok
 }
 
-func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) {
+func (ws *priorityWriteSchedulerRFC7540) addClosedOrIdleNode(list *[]*priorityNodeRFC7540, maxSize int, n *priorityNodeRFC7540) {
 	if maxSize == 0 {
 		return
 	}
@@ -442,7 +442,7 @@
 	*list = append(*list, n)
 }
 
-func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
+func (ws *priorityWriteSchedulerRFC7540) removeNode(n *priorityNodeRFC7540) {
 	for n.kids != nil {
 		n.kids.setParent(n.parent)
 	}
diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go
new file mode 100644
index 0000000..9b5b880
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go
@@ -0,0 +1,209 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+	"fmt"
+	"math"
+)
+
+type streamMetadata struct {
+	location *writeQueue
+	priority PriorityParam
+}
+
+type priorityWriteSchedulerRFC9218 struct {
+	// control contains control frames (SETTINGS, PING, etc.).
+	control writeQueue
+
+	// heads contain the head of a circular list of streams.
+	// We put these heads within a nested array that represents urgency and
+	// incremental, as defined in
+	// https://www.rfc-editor.org/rfc/rfc9218.html#name-priority-parameters.
+	// 8 represents u=0 up to u=7, and 2 represents i=false and i=true.
+	heads [8][2]*writeQueue
+
+	// streams contains a mapping between each stream ID and their metadata, so
+	// we can quickly locate them when needing to, for example, adjust their
+	// priority.
+	streams map[uint32]streamMetadata
+
+	// queuePool are empty queues for reuse.
+	queuePool writeQueuePool
+
+	// prioritizeIncremental is used to determine whether we should prioritize
+	// incremental streams or not, when urgency is the same in a given Pop()
+	// call.
+	prioritizeIncremental bool
+}
+
+func newPriorityWriteSchedulerRFC9128() WriteScheduler {
+	ws := &priorityWriteSchedulerRFC9218{
+		streams: make(map[uint32]streamMetadata),
+	}
+	return ws
+}
+
+func (ws *priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt OpenStreamOptions) {
+	if ws.streams[streamID].location != nil {
+		panic(fmt.Errorf("stream %d already opened", streamID))
+	}
+	q := ws.queuePool.get()
+	ws.streams[streamID] = streamMetadata{
+		location: q,
+		priority: opt.priority,
+	}
+
+	u, i := opt.priority.urgency, opt.priority.incremental
+	if ws.heads[u][i] == nil {
+		ws.heads[u][i] = q
+		q.next = q
+		q.prev = q
+	} else {
+		// Queues are stored in a ring.
+		// Insert the new stream before ws.head, putting it at the end of the list.
+		q.prev = ws.heads[u][i].prev
+		q.next = ws.heads[u][i]
+		q.prev.next = q
+		q.next.prev = q
+	}
+}
+
+func (ws *priorityWriteSchedulerRFC9218) CloseStream(streamID uint32) {
+	metadata := ws.streams[streamID]
+	q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
+	if q == nil {
+		return
+	}
+	if q.next == q {
+		// This was the only open stream.
+		ws.heads[u][i] = nil
+	} else {
+		q.prev.next = q.next
+		q.next.prev = q.prev
+		if ws.heads[u][i] == q {
+			ws.heads[u][i] = q.next
+		}
+	}
+	delete(ws.streams, streamID)
+	ws.queuePool.put(q)
+}
+
+func (ws *priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority PriorityParam) {
+	metadata := ws.streams[streamID]
+	q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
+	if q == nil {
+		return
+	}
+
+	// Remove stream from current location.
+	if q.next == q {
+		// This was the only open stream.
+		ws.heads[u][i] = nil
+	} else {
+		q.prev.next = q.next
+		q.next.prev = q.prev
+		if ws.heads[u][i] == q {
+			ws.heads[u][i] = q.next
+		}
+	}
+
+	// Insert stream to the new queue.
+	u, i = priority.urgency, priority.incremental
+	if ws.heads[u][i] == nil {
+		ws.heads[u][i] = q
+		q.next = q
+		q.prev = q
+	} else {
+		// Queues are stored in a ring.
+		// Insert the new stream before ws.head, putting it at the end of the list.
+		q.prev = ws.heads[u][i].prev
+		q.next = ws.heads[u][i]
+		q.prev.next = q
+		q.next.prev = q
+	}
+
+	// Update the metadata.
+	ws.streams[streamID] = streamMetadata{
+		location: q,
+		priority: priority,
+	}
+}
+
+func (ws *priorityWriteSchedulerRFC9218) Push(wr FrameWriteRequest) {
+	if wr.isControl() {
+		ws.control.push(wr)
+		return
+	}
+	q := ws.streams[wr.StreamID()].location
+	if q == nil {
+		// This is a closed stream.
+		// wr should not be a HEADERS or DATA frame.
+		// We push the request onto the control queue.
+		if wr.DataSize() > 0 {
+			panic("add DATA on non-open stream")
+		}
+		ws.control.push(wr)
+		return
+	}
+	q.push(wr)
+}
+
+func (ws *priorityWriteSchedulerRFC9218) Pop() (FrameWriteRequest, bool) {
+	// Control and RST_STREAM frames first.
+	if !ws.control.empty() {
+		return ws.control.shift(), true
+	}
+
+	// On the next Pop(), we want to prioritize incremental if we prioritized
+	// non-incremental request of the same urgency this time. Vice-versa.
+	// i.e. when there are incremental and non-incremental requests at the same
+	// priority, we give 50% of our bandwidth to the incremental ones in
+	// aggregate and 50% to the first non-incremental one (since
+	// non-incremental streams do not use round-robin writes).
+	ws.prioritizeIncremental = !ws.prioritizeIncremental
+
+	// Always prioritize lowest u (i.e. highest urgency level).
+	for u := range ws.heads {
+		for i := range ws.heads[u] {
+			// When we want to prioritize incremental, we try to pop i=true
+			// first before i=false when u is the same.
+			if ws.prioritizeIncremental {
+				i = (i + 1) % 2
+			}
+			q := ws.heads[u][i]
+			if q == nil {
+				continue
+			}
+			for {
+				if wr, ok := q.consume(math.MaxInt32); ok {
+					if i == 1 {
+						// For incremental streams, we update head to q.next so
+						// we can round-robin between multiple streams that can
+						// immediately benefit from partial writes.
+						ws.heads[u][i] = q.next
+					} else {
+						// For non-incremental streams, we try to finish one to
+						// completion rather than doing round-robin. However,
+						// we update head here so that if q.consume() is !ok
+						// (e.g. the stream has no more frame to consume), head
+						// is updated to the next q that has frames to consume
+						// on future iterations. This way, we do not prioritize
+						// writing to unavailable stream on next Pop() calls,
+						// preventing head-of-line blocking.
+						ws.heads[u][i] = q
+					}
+					return wr, true
+				}
+				q = q.next
+				if q == ws.heads[u][i] {
+					break
+				}
+			}
+
+		}
+	}
+	return FrameWriteRequest{}, false
+}
diff --git a/vendor/golang.org/x/net/http2/writesched_roundrobin.go b/vendor/golang.org/x/net/http2/writesched_roundrobin.go
index 54fe863..737cff9 100644
--- a/vendor/golang.org/x/net/http2/writesched_roundrobin.go
+++ b/vendor/golang.org/x/net/http2/writesched_roundrobin.go
@@ -25,7 +25,7 @@
 }
 
 // newRoundRobinWriteScheduler constructs a new write scheduler.
-// The round robin scheduler priorizes control frames
+// The round robin scheduler prioritizes control frames
 // like SETTINGS and PING over DATA frames.
 // When there are no control frames to send, it performs a round-robin
 // selection from the ready streams.
diff --git a/vendor/golang.org/x/net/internal/httpcommon/request.go b/vendor/golang.org/x/net/internal/httpcommon/request.go
index 4b70553..1e10f89 100644
--- a/vendor/golang.org/x/net/internal/httpcommon/request.go
+++ b/vendor/golang.org/x/net/internal/httpcommon/request.go
@@ -51,7 +51,7 @@
 	DefaultUserAgent string
 }
 
-// EncodeHeadersParam is the result of EncodeHeaders.
+// EncodeHeadersResult is the result of EncodeHeaders.
 type EncodeHeadersResult struct {
 	HasBody     bool
 	HasTrailers bool
@@ -399,7 +399,7 @@
 
 	// If the request should be rejected, this is a short string suitable for passing
 	// to the http2 package's CountError function.
-	// It might be a bit odd to return errors this way rather than returing an error,
+	// It might be a bit odd to return errors this way rather than returning an error,
 	// but this ensures we don't forget to include a CountError reason.
 	InvalidReason string
 }
diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go
index 84fcc32..8eedb84 100644
--- a/vendor/golang.org/x/net/internal/socks/socks.go
+++ b/vendor/golang.org/x/net/internal/socks/socks.go
@@ -297,7 +297,7 @@
 		b = append(b, up.Username...)
 		b = append(b, byte(len(up.Password)))
 		b = append(b, up.Password...)
-		// TODO(mikio): handle IO deadlines and cancelation if
+		// TODO(mikio): handle IO deadlines and cancellation if
 		// necessary
 		if _, err := rw.Write(b); err != nil {
 			return err
diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go
index c646a69..3aaffdd 100644
--- a/vendor/golang.org/x/net/trace/events.go
+++ b/vendor/golang.org/x/net/trace/events.go
@@ -508,7 +508,7 @@
 	<tr class="first">
 		<td class="when">{{$el.When}}</td>
 		<td class="elapsed">{{$el.ElapsedTime}}</td>
-		<td>{{$el.Title}}
+		<td>{{$el.Title}}</td>
 	</tr>
 	{{if $.Expanded}}
 	<tr>