[VOL-5486] Upgrade library versions

Change-Id: I8b4e88699e03f44ee13e467867f45ae3f0a63c4b
Signed-off-by: Abhay Kumar <abhay.kumar@radisys.com>
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/README.md b/vendor/go.opentelemetry.io/otel/sdk/trace/README.md
new file mode 100644
index 0000000..f2936e1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/README.md
@@ -0,0 +1,3 @@
+# SDK Trace
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/trace)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
new file mode 100644
index 0000000..6966ed8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
@@ -0,0 +1,414 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	"errors"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/internal/global"
+	"go.opentelemetry.io/otel/sdk/internal/env"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// Defaults for BatchSpanProcessorOptions.
+const (
+	DefaultMaxQueueSize       = 2048
+	DefaultScheduleDelay      = 5000
+	DefaultExportTimeout      = 30000
+	DefaultMaxExportBatchSize = 512
+)
+
+// BatchSpanProcessorOption configures a BatchSpanProcessor.
+type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions)
+
+// BatchSpanProcessorOptions is configuration settings for a
+// BatchSpanProcessor.
+type BatchSpanProcessorOptions struct {
+	// MaxQueueSize is the maximum queue size to buffer spans for delayed processing. If the
+	// queue gets full it drops the spans. Use BlockOnQueueFull to change this behavior.
+	// The default value of MaxQueueSize is 2048.
+	MaxQueueSize int
+
+	// BatchTimeout is the maximum duration for constructing a batch. Processor
+	// forcefully sends available spans when timeout is reached.
+	// The default value of BatchTimeout is 5000 msec.
+	BatchTimeout time.Duration
+
+	// ExportTimeout specifies the maximum duration for exporting spans. If the timeout
+	// is reached, the export will be cancelled.
+	// The default value of ExportTimeout is 30000 msec.
+	ExportTimeout time.Duration
+
+	// MaxExportBatchSize is the maximum number of spans to process in a single batch.
+	// If there are more than one batch worth of spans then it processes multiple batches
+	// of spans one batch after the other without any delay.
+	// The default value of MaxExportBatchSize is 512.
+	MaxExportBatchSize int
+
+	// BlockOnQueueFull blocks onEnd() and onStart() method if the queue is full
+	// AND if BlockOnQueueFull is set to true.
+	// Blocking option should be used carefully as it can severely affect the performance of an
+	// application.
+	BlockOnQueueFull bool
+}
+
+// batchSpanProcessor is a SpanProcessor that batches asynchronously-received
+// spans and sends them to a trace.Exporter when complete.
+type batchSpanProcessor struct {
+	e SpanExporter
+	o BatchSpanProcessorOptions
+
+	queue   chan ReadOnlySpan
+	dropped uint32
+
+	batch      []ReadOnlySpan
+	batchMutex sync.Mutex
+	timer      *time.Timer
+	stopWait   sync.WaitGroup
+	stopOnce   sync.Once
+	stopCh     chan struct{}
+	stopped    atomic.Bool
+}
+
+var _ SpanProcessor = (*batchSpanProcessor)(nil)
+
+// NewBatchSpanProcessor creates a new SpanProcessor that will send completed
+// span batches to the exporter with the supplied options.
+//
+// If the exporter is nil, the span processor will perform no action.
+func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorOption) SpanProcessor {
+	maxQueueSize := env.BatchSpanProcessorMaxQueueSize(DefaultMaxQueueSize)
+	maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize)
+
+	if maxExportBatchSize > maxQueueSize {
+		if DefaultMaxExportBatchSize > maxQueueSize {
+			maxExportBatchSize = maxQueueSize
+		} else {
+			maxExportBatchSize = DefaultMaxExportBatchSize
+		}
+	}
+
+	o := BatchSpanProcessorOptions{
+		BatchTimeout:       time.Duration(env.BatchSpanProcessorScheduleDelay(DefaultScheduleDelay)) * time.Millisecond,
+		ExportTimeout:      time.Duration(env.BatchSpanProcessorExportTimeout(DefaultExportTimeout)) * time.Millisecond,
+		MaxQueueSize:       maxQueueSize,
+		MaxExportBatchSize: maxExportBatchSize,
+	}
+	for _, opt := range options {
+		opt(&o)
+	}
+	bsp := &batchSpanProcessor{
+		e:      exporter,
+		o:      o,
+		batch:  make([]ReadOnlySpan, 0, o.MaxExportBatchSize),
+		timer:  time.NewTimer(o.BatchTimeout),
+		queue:  make(chan ReadOnlySpan, o.MaxQueueSize),
+		stopCh: make(chan struct{}),
+	}
+
+	bsp.stopWait.Add(1)
+	go func() {
+		defer bsp.stopWait.Done()
+		bsp.processQueue()
+		bsp.drainQueue()
+	}()
+
+	return bsp
+}
+
+// OnStart method does nothing.
+func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {}
+
+// OnEnd method enqueues a ReadOnlySpan for later processing.
+func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) {
+	// Do not enqueue spans after Shutdown.
+	if bsp.stopped.Load() {
+		return
+	}
+
+	// Do not enqueue spans if we are just going to drop them.
+	if bsp.e == nil {
+		return
+	}
+	bsp.enqueue(s)
+}
+
+// Shutdown flushes the queue and waits until all spans are processed.
+// It only executes once. Subsequent call does nothing.
+func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error {
+	var err error
+	bsp.stopOnce.Do(func() {
+		bsp.stopped.Store(true)
+		wait := make(chan struct{})
+		go func() {
+			close(bsp.stopCh)
+			bsp.stopWait.Wait()
+			if bsp.e != nil {
+				if err := bsp.e.Shutdown(ctx); err != nil {
+					otel.Handle(err)
+				}
+			}
+			close(wait)
+		}()
+		// Wait until the wait group is done or the context is cancelled
+		select {
+		case <-wait:
+		case <-ctx.Done():
+			err = ctx.Err()
+		}
+	})
+	return err
+}
+
+type forceFlushSpan struct {
+	ReadOnlySpan
+	flushed chan struct{}
+}
+
+func (f forceFlushSpan) SpanContext() trace.SpanContext {
+	return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled})
+}
+
+// ForceFlush exports all ended spans that have not yet been exported.
+func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error {
+	// Interrupt if context is already canceled.
+	if err := ctx.Err(); err != nil {
+		return err
+	}
+
+	// Do nothing after Shutdown.
+	if bsp.stopped.Load() {
+		return nil
+	}
+
+	var err error
+	if bsp.e != nil {
+		flushCh := make(chan struct{})
+		if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}) {
+			select {
+			case <-bsp.stopCh:
+				// The batchSpanProcessor is Shutdown.
+				return nil
+			case <-flushCh:
+				// Processed any items in queue prior to ForceFlush being called
+			case <-ctx.Done():
+				return ctx.Err()
+			}
+		}
+
+		wait := make(chan error, 1)
+		go func() {
+			wait <- bsp.exportSpans(ctx)
+		}()
+		// Wait until the export is finished or the context is cancelled/timed out
+		select {
+		case err = <-wait:
+		case <-ctx.Done():
+			err = ctx.Err()
+		}
+	}
+	return err
+}
+
+// WithMaxQueueSize returns a BatchSpanProcessorOption that configures the
+// maximum queue size allowed for a BatchSpanProcessor.
+func WithMaxQueueSize(size int) BatchSpanProcessorOption {
+	return func(o *BatchSpanProcessorOptions) {
+		o.MaxQueueSize = size
+	}
+}
+
+// WithMaxExportBatchSize returns a BatchSpanProcessorOption that configures
+// the maximum export batch size allowed for a BatchSpanProcessor.
+func WithMaxExportBatchSize(size int) BatchSpanProcessorOption {
+	return func(o *BatchSpanProcessorOptions) {
+		o.MaxExportBatchSize = size
+	}
+}
+
+// WithBatchTimeout returns a BatchSpanProcessorOption that configures the
+// maximum delay allowed for a BatchSpanProcessor before it will export any
+// held span (whether the queue is full or not).
+func WithBatchTimeout(delay time.Duration) BatchSpanProcessorOption {
+	return func(o *BatchSpanProcessorOptions) {
+		o.BatchTimeout = delay
+	}
+}
+
+// WithExportTimeout returns a BatchSpanProcessorOption that configures the
+// amount of time a BatchSpanProcessor waits for an exporter to export before
+// abandoning the export.
+func WithExportTimeout(timeout time.Duration) BatchSpanProcessorOption {
+	return func(o *BatchSpanProcessorOptions) {
+		o.ExportTimeout = timeout
+	}
+}
+
+// WithBlocking returns a BatchSpanProcessorOption that configures a
+// BatchSpanProcessor to wait for enqueue operations to succeed instead of
+// dropping data when the queue is full.
+func WithBlocking() BatchSpanProcessorOption {
+	return func(o *BatchSpanProcessorOptions) {
+		o.BlockOnQueueFull = true
+	}
+}
+
+// exportSpans is a subroutine of processing and draining the queue.
+func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error {
+	bsp.timer.Reset(bsp.o.BatchTimeout)
+
+	bsp.batchMutex.Lock()
+	defer bsp.batchMutex.Unlock()
+
+	if bsp.o.ExportTimeout > 0 {
+		var cancel context.CancelFunc
+		ctx, cancel = context.WithTimeoutCause(ctx, bsp.o.ExportTimeout, errors.New("processor export timeout"))
+		defer cancel()
+	}
+
+	if l := len(bsp.batch); l > 0 {
+		global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped))
+		err := bsp.e.ExportSpans(ctx, bsp.batch)
+
+		// A new batch is always created after exporting, even if the batch failed to be exported.
+		//
+		// It is up to the exporter to implement any type of retry logic if a batch is failing
+		// to be exported, since it is specific to the protocol and backend being sent to.
+		clear(bsp.batch) // Erase elements to let GC collect objects
+		bsp.batch = bsp.batch[:0]
+
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// processQueue removes spans from the `queue` channel until processor
+// is shut down. It calls the exporter in batches of up to MaxExportBatchSize
+// waiting up to BatchTimeout to form a batch.
+func (bsp *batchSpanProcessor) processQueue() {
+	defer bsp.timer.Stop()
+
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	for {
+		select {
+		case <-bsp.stopCh:
+			return
+		case <-bsp.timer.C:
+			if err := bsp.exportSpans(ctx); err != nil {
+				otel.Handle(err)
+			}
+		case sd := <-bsp.queue:
+			if ffs, ok := sd.(forceFlushSpan); ok {
+				close(ffs.flushed)
+				continue
+			}
+			bsp.batchMutex.Lock()
+			bsp.batch = append(bsp.batch, sd)
+			shouldExport := len(bsp.batch) >= bsp.o.MaxExportBatchSize
+			bsp.batchMutex.Unlock()
+			if shouldExport {
+				if !bsp.timer.Stop() {
+					// Handle both GODEBUG=asynctimerchan=[0|1] properly.
+					select {
+					case <-bsp.timer.C:
+					default:
+					}
+				}
+				if err := bsp.exportSpans(ctx); err != nil {
+					otel.Handle(err)
+				}
+			}
+		}
+	}
+}
+
+// drainQueue awaits the any caller that had added to bsp.stopWait
+// to finish the enqueue, then exports the final batch.
+func (bsp *batchSpanProcessor) drainQueue() {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	for {
+		select {
+		case sd := <-bsp.queue:
+			if _, ok := sd.(forceFlushSpan); ok {
+				// Ignore flush requests as they are not valid spans.
+				continue
+			}
+
+			bsp.batchMutex.Lock()
+			bsp.batch = append(bsp.batch, sd)
+			shouldExport := len(bsp.batch) == bsp.o.MaxExportBatchSize
+			bsp.batchMutex.Unlock()
+
+			if shouldExport {
+				if err := bsp.exportSpans(ctx); err != nil {
+					otel.Handle(err)
+				}
+			}
+		default:
+			// There are no more enqueued spans. Make final export.
+			if err := bsp.exportSpans(ctx); err != nil {
+				otel.Handle(err)
+			}
+			return
+		}
+	}
+}
+
+func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) {
+	ctx := context.TODO()
+	if bsp.o.BlockOnQueueFull {
+		bsp.enqueueBlockOnQueueFull(ctx, sd)
+	} else {
+		bsp.enqueueDrop(ctx, sd)
+	}
+}
+
+func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan) bool {
+	if !sd.SpanContext().IsSampled() {
+		return false
+	}
+
+	select {
+	case bsp.queue <- sd:
+		return true
+	case <-ctx.Done():
+		return false
+	}
+}
+
+func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool {
+	if !sd.SpanContext().IsSampled() {
+		return false
+	}
+
+	select {
+	case bsp.queue <- sd:
+		return true
+	default:
+		atomic.AddUint32(&bsp.dropped, 1)
+	}
+	return false
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Span Processor.
+func (bsp *batchSpanProcessor) MarshalLog() interface{} {
+	return struct {
+		Type         string
+		SpanExporter SpanExporter
+		Config       BatchSpanProcessorOptions
+	}{
+		Type:         "BatchSpanProcessor",
+		SpanExporter: bsp.e,
+		Config:       bsp.o,
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
new file mode 100644
index 0000000..1f60524
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package trace contains support for OpenTelemetry distributed tracing.
+
+The following assumes a basic familiarity with OpenTelemetry concepts.
+See https://opentelemetry.io.
+*/
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/event.go b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go
new file mode 100644
index 0000000..60a7ed1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go
@@ -0,0 +1,26 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"time"
+
+	"go.opentelemetry.io/otel/attribute"
+)
+
+// Event is a thing that happened during a Span's lifetime.
+type Event struct {
+	// Name is the name of this event
+	Name string
+
+	// Attributes describe the aspects of the event.
+	Attributes []attribute.KeyValue
+
+	// DroppedAttributeCount is the number of attributes that were not
+	// recorded due to configured limits being reached.
+	DroppedAttributeCount int
+
+	// Time at which this event was recorded.
+	Time time.Time
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
new file mode 100644
index 0000000..8c308dd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
@@ -0,0 +1,64 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"slices"
+	"sync"
+
+	"go.opentelemetry.io/otel/internal/global"
+)
+
+// evictedQueue is a FIFO queue with a configurable capacity.
+type evictedQueue[T any] struct {
+	queue          []T
+	capacity       int
+	droppedCount   int
+	logDroppedMsg  string
+	logDroppedOnce sync.Once
+}
+
+func newEvictedQueueEvent(capacity int) evictedQueue[Event] {
+	// Do not pre-allocate queue, do this lazily.
+	return evictedQueue[Event]{
+		capacity:      capacity,
+		logDroppedMsg: "limit reached: dropping trace trace.Event",
+	}
+}
+
+func newEvictedQueueLink(capacity int) evictedQueue[Link] {
+	// Do not pre-allocate queue, do this lazily.
+	return evictedQueue[Link]{
+		capacity:      capacity,
+		logDroppedMsg: "limit reached: dropping trace trace.Link",
+	}
+}
+
+// add adds value to the evictedQueue eq. If eq is at capacity, the oldest
+// queued value will be discarded and the drop count incremented.
+func (eq *evictedQueue[T]) add(value T) {
+	if eq.capacity == 0 {
+		eq.droppedCount++
+		eq.logDropped()
+		return
+	}
+
+	if eq.capacity > 0 && len(eq.queue) == eq.capacity {
+		// Drop first-in while avoiding allocating more capacity to eq.queue.
+		copy(eq.queue[:eq.capacity-1], eq.queue[1:])
+		eq.queue = eq.queue[:eq.capacity-1]
+		eq.droppedCount++
+		eq.logDropped()
+	}
+	eq.queue = append(eq.queue, value)
+}
+
+func (eq *evictedQueue[T]) logDropped() {
+	eq.logDroppedOnce.Do(func() { global.Warn(eq.logDroppedMsg) })
+}
+
+// copy returns a copy of the evictedQueue.
+func (eq *evictedQueue[T]) copy() []T {
+	return slices.Clone(eq.queue)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
new file mode 100644
index 0000000..c8d3fb7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
@@ -0,0 +1,69 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	"encoding/binary"
+	"math/rand/v2"
+
+	"go.opentelemetry.io/otel/trace"
+)
+
+// IDGenerator allows custom generators for TraceID and SpanID.
+type IDGenerator interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// NewIDs returns a new trace and span ID.
+	NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID)
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// NewSpanID returns a ID for a new span in the trace with traceID.
+	NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
+
+type randomIDGenerator struct{}
+
+var _ IDGenerator = &randomIDGenerator{}
+
+// NewSpanID returns a non-zero span ID from a randomly-chosen sequence.
+func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID {
+	sid := trace.SpanID{}
+	for {
+		binary.NativeEndian.PutUint64(sid[:], rand.Uint64())
+		if sid.IsValid() {
+			break
+		}
+	}
+	return sid
+}
+
+// NewIDs returns a non-zero trace ID and a non-zero span ID from a
+// randomly-chosen sequence.
+func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) {
+	tid := trace.TraceID{}
+	sid := trace.SpanID{}
+	for {
+		binary.NativeEndian.PutUint64(tid[:8], rand.Uint64())
+		binary.NativeEndian.PutUint64(tid[8:], rand.Uint64())
+		if tid.IsValid() {
+			break
+		}
+	}
+	for {
+		binary.NativeEndian.PutUint64(sid[:], rand.Uint64())
+		if sid.IsValid() {
+			break
+		}
+	}
+	return tid, sid
+}
+
+func defaultIDGenerator() IDGenerator {
+	return &randomIDGenerator{}
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/link.go b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go
new file mode 100644
index 0000000..c03bdc9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go
@@ -0,0 +1,23 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// Link is the relationship between two Spans. The relationship can be within
+// the same Trace or across different Traces.
+type Link struct {
+	// SpanContext of the linked Span.
+	SpanContext trace.SpanContext
+
+	// Attributes describe the aspects of the link.
+	Attributes []attribute.KeyValue
+
+	// DroppedAttributeCount is the number of attributes that were not
+	// recorded due to configured limits being reached.
+	DroppedAttributeCount int
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
new file mode 100644
index 0000000..0e2a2e7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
@@ -0,0 +1,504 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	"fmt"
+	"sync"
+	"sync/atomic"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/internal/global"
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+	"go.opentelemetry.io/otel/sdk/resource"
+	"go.opentelemetry.io/otel/trace"
+	"go.opentelemetry.io/otel/trace/embedded"
+	"go.opentelemetry.io/otel/trace/noop"
+)
+
+const (
+	defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer"
+)
+
+// tracerProviderConfig.
+type tracerProviderConfig struct {
+	// processors contains collection of SpanProcessors that are processing pipeline
+	// for spans in the trace signal.
+	// SpanProcessors registered with a TracerProvider and are called at the start
+	// and end of a Span's lifecycle, and are called in the order they are
+	// registered.
+	processors []SpanProcessor
+
+	// sampler is the default sampler used when creating new spans.
+	sampler Sampler
+
+	// idGenerator is used to generate all Span and Trace IDs when needed.
+	idGenerator IDGenerator
+
+	// spanLimits defines the attribute, event, and link limits for spans.
+	spanLimits SpanLimits
+
+	// resource contains attributes representing an entity that produces telemetry.
+	resource *resource.Resource
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Provider.
+func (cfg tracerProviderConfig) MarshalLog() interface{} {
+	return struct {
+		SpanProcessors  []SpanProcessor
+		SamplerType     string
+		IDGeneratorType string
+		SpanLimits      SpanLimits
+		Resource        *resource.Resource
+	}{
+		SpanProcessors:  cfg.processors,
+		SamplerType:     fmt.Sprintf("%T", cfg.sampler),
+		IDGeneratorType: fmt.Sprintf("%T", cfg.idGenerator),
+		SpanLimits:      cfg.spanLimits,
+		Resource:        cfg.resource,
+	}
+}
+
+// TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to
+// instrumentation so it can trace operational flow through a system.
+type TracerProvider struct {
+	embedded.TracerProvider
+
+	mu             sync.Mutex
+	namedTracer    map[instrumentation.Scope]*tracer
+	spanProcessors atomic.Pointer[spanProcessorStates]
+
+	isShutdown atomic.Bool
+
+	// These fields are not protected by the lock mu. They are assumed to be
+	// immutable after creation of the TracerProvider.
+	sampler     Sampler
+	idGenerator IDGenerator
+	spanLimits  SpanLimits
+	resource    *resource.Resource
+}
+
+var _ trace.TracerProvider = &TracerProvider{}
+
+// NewTracerProvider returns a new and configured TracerProvider.
+//
+// By default the returned TracerProvider is configured with:
+//   - a ParentBased(AlwaysSample) Sampler
+//   - a random number IDGenerator
+//   - the resource.Default() Resource
+//   - the default SpanLimits.
+//
+// The passed opts are used to override these default values and configure the
+// returned TracerProvider appropriately.
+func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider {
+	o := tracerProviderConfig{
+		spanLimits: NewSpanLimits(),
+	}
+	o = applyTracerProviderEnvConfigs(o)
+
+	for _, opt := range opts {
+		o = opt.apply(o)
+	}
+
+	o = ensureValidTracerProviderConfig(o)
+
+	tp := &TracerProvider{
+		namedTracer: make(map[instrumentation.Scope]*tracer),
+		sampler:     o.sampler,
+		idGenerator: o.idGenerator,
+		spanLimits:  o.spanLimits,
+		resource:    o.resource,
+	}
+	global.Info("TracerProvider created", "config", o)
+
+	spss := make(spanProcessorStates, 0, len(o.processors))
+	for _, sp := range o.processors {
+		spss = append(spss, newSpanProcessorState(sp))
+	}
+	tp.spanProcessors.Store(&spss)
+
+	return tp
+}
+
+// Tracer returns a Tracer with the given name and options. If a Tracer for
+// the given name and options does not exist it is created, otherwise the
+// existing Tracer is returned.
+//
+// If name is empty, DefaultTracerName is used instead.
+//
+// This method is safe to be called concurrently.
+func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
+	// This check happens before the mutex is acquired to avoid deadlocking if Tracer() is called from within Shutdown().
+	if p.isShutdown.Load() {
+		return noop.NewTracerProvider().Tracer(name, opts...)
+	}
+	c := trace.NewTracerConfig(opts...)
+	if name == "" {
+		name = defaultTracerName
+	}
+	is := instrumentation.Scope{
+		Name:       name,
+		Version:    c.InstrumentationVersion(),
+		SchemaURL:  c.SchemaURL(),
+		Attributes: c.InstrumentationAttributes(),
+	}
+
+	t, ok := func() (trace.Tracer, bool) {
+		p.mu.Lock()
+		defer p.mu.Unlock()
+		// Must check the flag after acquiring the mutex to avoid returning a valid tracer if Shutdown() ran
+		// after the first check above but before we acquired the mutex.
+		if p.isShutdown.Load() {
+			return noop.NewTracerProvider().Tracer(name, opts...), true
+		}
+		t, ok := p.namedTracer[is]
+		if !ok {
+			t = &tracer{
+				provider:             p,
+				instrumentationScope: is,
+			}
+			p.namedTracer[is] = t
+		}
+		return t, ok
+	}()
+	if !ok {
+		// This code is outside the mutex to not hold the lock while calling third party logging code:
+		// - That code may do slow things like I/O, which would prolong the duration the lock is held,
+		//   slowing down all tracing consumers.
+		// - Logging code may be instrumented with tracing and deadlock because it could try
+		//   acquiring the same non-reentrant mutex.
+		global.Info(
+			"Tracer created",
+			"name",
+			name,
+			"version",
+			is.Version,
+			"schemaURL",
+			is.SchemaURL,
+			"attributes",
+			is.Attributes,
+		)
+	}
+	return t
+}
+
+// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors.
+func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) {
+	// This check prevents calls during a shutdown.
+	if p.isShutdown.Load() {
+		return
+	}
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	// This check prevents calls after a shutdown.
+	if p.isShutdown.Load() {
+		return
+	}
+
+	current := p.getSpanProcessors()
+	newSPS := make(spanProcessorStates, 0, len(current)+1)
+	newSPS = append(newSPS, current...)
+	newSPS = append(newSPS, newSpanProcessorState(sp))
+	p.spanProcessors.Store(&newSPS)
+}
+
+// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors.
+func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) {
+	// This check prevents calls during a shutdown.
+	if p.isShutdown.Load() {
+		return
+	}
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	// This check prevents calls after a shutdown.
+	if p.isShutdown.Load() {
+		return
+	}
+	old := p.getSpanProcessors()
+	if len(old) == 0 {
+		return
+	}
+	spss := make(spanProcessorStates, len(old))
+	copy(spss, old)
+
+	// stop the span processor if it is started and remove it from the list
+	var stopOnce *spanProcessorState
+	var idx int
+	for i, sps := range spss {
+		if sps.sp == sp {
+			stopOnce = sps
+			idx = i
+		}
+	}
+	if stopOnce != nil {
+		stopOnce.state.Do(func() {
+			if err := sp.Shutdown(context.Background()); err != nil {
+				otel.Handle(err)
+			}
+		})
+	}
+	if len(spss) > 1 {
+		copy(spss[idx:], spss[idx+1:])
+	}
+	spss[len(spss)-1] = nil
+	spss = spss[:len(spss)-1]
+
+	p.spanProcessors.Store(&spss)
+}
+
+// ForceFlush immediately exports all spans that have not yet been exported for
+// all the registered span processors.
+func (p *TracerProvider) ForceFlush(ctx context.Context) error {
+	spss := p.getSpanProcessors()
+	if len(spss) == 0 {
+		return nil
+	}
+
+	for _, sps := range spss {
+		select {
+		case <-ctx.Done():
+			return ctx.Err()
+		default:
+		}
+
+		if err := sps.sp.ForceFlush(ctx); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Shutdown shuts down TracerProvider. All registered span processors are shut down
+// in the order they were registered and any held computational resources are released.
+// After Shutdown is called, all methods are no-ops.
+func (p *TracerProvider) Shutdown(ctx context.Context) error {
+	// This check prevents deadlocks in case of recursive shutdown.
+	if p.isShutdown.Load() {
+		return nil
+	}
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	// This check prevents calls after a shutdown has already been done concurrently.
+	if !p.isShutdown.CompareAndSwap(false, true) { // did toggle?
+		return nil
+	}
+
+	var retErr error
+	for _, sps := range p.getSpanProcessors() {
+		select {
+		case <-ctx.Done():
+			return ctx.Err()
+		default:
+		}
+
+		var err error
+		sps.state.Do(func() {
+			err = sps.sp.Shutdown(ctx)
+		})
+		if err != nil {
+			if retErr == nil {
+				retErr = err
+			} else {
+				// Poor man's list of errors
+				retErr = fmt.Errorf("%w; %w", retErr, err)
+			}
+		}
+	}
+	p.spanProcessors.Store(&spanProcessorStates{})
+	return retErr
+}
+
+func (p *TracerProvider) getSpanProcessors() spanProcessorStates {
+	return *(p.spanProcessors.Load())
+}
+
+// TracerProviderOption configures a TracerProvider.
+type TracerProviderOption interface {
+	apply(tracerProviderConfig) tracerProviderConfig
+}
+
+type traceProviderOptionFunc func(tracerProviderConfig) tracerProviderConfig
+
+func (fn traceProviderOptionFunc) apply(cfg tracerProviderConfig) tracerProviderConfig {
+	return fn(cfg)
+}
+
+// WithSyncer registers the exporter with the TracerProvider using a
+// SimpleSpanProcessor.
+//
+// This is not recommended for production use. The synchronous nature of the
+// SimpleSpanProcessor that will wrap the exporter make it good for testing,
+// debugging, or showing examples of other feature, but it will be slow and
+// have a high computation resource usage overhead. The WithBatcher option is
+// recommended for production use instead.
+func WithSyncer(e SpanExporter) TracerProviderOption {
+	return WithSpanProcessor(NewSimpleSpanProcessor(e))
+}
+
+// WithBatcher registers the exporter with the TracerProvider using a
+// BatchSpanProcessor configured with the passed opts.
+func WithBatcher(e SpanExporter, opts ...BatchSpanProcessorOption) TracerProviderOption {
+	return WithSpanProcessor(NewBatchSpanProcessor(e, opts...))
+}
+
+// WithSpanProcessor registers the SpanProcessor with a TracerProvider.
+func WithSpanProcessor(sp SpanProcessor) TracerProviderOption {
+	return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+		cfg.processors = append(cfg.processors, sp)
+		return cfg
+	})
+}
+
+// WithResource returns a TracerProviderOption that will configure the
+// Resource r as a TracerProvider's Resource. The configured Resource is
+// referenced by all the Tracers the TracerProvider creates. It represents the
+// entity producing telemetry.
+//
+// If this option is not used, the TracerProvider will use the
+// resource.Default() Resource by default.
+func WithResource(r *resource.Resource) TracerProviderOption {
+	return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+		var err error
+		cfg.resource, err = resource.Merge(resource.Environment(), r)
+		if err != nil {
+			otel.Handle(err)
+		}
+		return cfg
+	})
+}
+
+// WithIDGenerator returns a TracerProviderOption that will configure the
+// IDGenerator g as a TracerProvider's IDGenerator. The configured IDGenerator
+// is used by the Tracers the TracerProvider creates to generate new Span and
+// Trace IDs.
+//
+// If this option is not used, the TracerProvider will use a random number
+// IDGenerator by default.
+func WithIDGenerator(g IDGenerator) TracerProviderOption {
+	return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+		if g != nil {
+			cfg.idGenerator = g
+		}
+		return cfg
+	})
+}
+
+// WithSampler returns a TracerProviderOption that will configure the Sampler
+// s as a TracerProvider's Sampler. The configured Sampler is used by the
+// Tracers the TracerProvider creates to make their sampling decisions for the
+// Spans they create.
+//
+// This option overrides the Sampler configured through the OTEL_TRACES_SAMPLER
+// and OTEL_TRACES_SAMPLER_ARG environment variables. If this option is not used
+// and the sampler is not configured through environment variables or the environment
+// contains invalid/unsupported configuration, the TracerProvider will use a
+// ParentBased(AlwaysSample) Sampler by default.
+func WithSampler(s Sampler) TracerProviderOption {
+	return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+		if s != nil {
+			cfg.sampler = s
+		}
+		return cfg
+	})
+}
+
+// WithSpanLimits returns a TracerProviderOption that configures a
+// TracerProvider to use the SpanLimits sl. These SpanLimits bound any Span
+// created by a Tracer from the TracerProvider.
+//
+// If any field of sl is zero or negative it will be replaced with the default
+// value for that field.
+//
+// If this or WithRawSpanLimits are not provided, the TracerProvider will use
+// the limits defined by environment variables, or the defaults if unset.
+// Refer to the NewSpanLimits documentation for information about this
+// relationship.
+//
+// Deprecated: Use WithRawSpanLimits instead which allows setting unlimited
+// and zero limits. This option will be kept until the next major version
+// incremented release.
+func WithSpanLimits(sl SpanLimits) TracerProviderOption {
+	if sl.AttributeValueLengthLimit <= 0 {
+		sl.AttributeValueLengthLimit = DefaultAttributeValueLengthLimit
+	}
+	if sl.AttributeCountLimit <= 0 {
+		sl.AttributeCountLimit = DefaultAttributeCountLimit
+	}
+	if sl.EventCountLimit <= 0 {
+		sl.EventCountLimit = DefaultEventCountLimit
+	}
+	if sl.AttributePerEventCountLimit <= 0 {
+		sl.AttributePerEventCountLimit = DefaultAttributePerEventCountLimit
+	}
+	if sl.LinkCountLimit <= 0 {
+		sl.LinkCountLimit = DefaultLinkCountLimit
+	}
+	if sl.AttributePerLinkCountLimit <= 0 {
+		sl.AttributePerLinkCountLimit = DefaultAttributePerLinkCountLimit
+	}
+	return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+		cfg.spanLimits = sl
+		return cfg
+	})
+}
+
+// WithRawSpanLimits returns a TracerProviderOption that configures a
+// TracerProvider to use these limits. These limits bound any Span created by
+// a Tracer from the TracerProvider.
+//
+// The limits will be used as-is. Zero or negative values will not be changed
+// to the default value like WithSpanLimits does. Setting a limit to zero will
+// effectively disable the related resource it limits and setting to a
+// negative value will mean that resource is unlimited. Consequentially, this
+// means that the zero-value SpanLimits will disable all span resources.
+// Because of this, limits should be constructed using NewSpanLimits and
+// updated accordingly.
+//
+// If this or WithSpanLimits are not provided, the TracerProvider will use the
+// limits defined by environment variables, or the defaults if unset. Refer to
+// the NewSpanLimits documentation for information about this relationship.
+func WithRawSpanLimits(limits SpanLimits) TracerProviderOption {
+	return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+		cfg.spanLimits = limits
+		return cfg
+	})
+}
+
+func applyTracerProviderEnvConfigs(cfg tracerProviderConfig) tracerProviderConfig {
+	for _, opt := range tracerProviderOptionsFromEnv() {
+		cfg = opt.apply(cfg)
+	}
+
+	return cfg
+}
+
+func tracerProviderOptionsFromEnv() []TracerProviderOption {
+	var opts []TracerProviderOption
+
+	sampler, err := samplerFromEnv()
+	if err != nil {
+		otel.Handle(err)
+	}
+
+	if sampler != nil {
+		opts = append(opts, WithSampler(sampler))
+	}
+
+	return opts
+}
+
+// ensureValidTracerProviderConfig ensures that given TracerProviderConfig is valid.
+func ensureValidTracerProviderConfig(cfg tracerProviderConfig) tracerProviderConfig {
+	if cfg.sampler == nil {
+		cfg.sampler = ParentBased(AlwaysSample())
+	}
+	if cfg.idGenerator == nil {
+		cfg.idGenerator = defaultIDGenerator()
+	}
+	if cfg.resource == nil {
+		cfg.resource = resource.Default()
+	}
+	return cfg
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go
new file mode 100644
index 0000000..9b672a1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go
@@ -0,0 +1,96 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"errors"
+	"os"
+	"strconv"
+	"strings"
+)
+
+const (
+	tracesSamplerKey    = "OTEL_TRACES_SAMPLER"
+	tracesSamplerArgKey = "OTEL_TRACES_SAMPLER_ARG"
+
+	samplerAlwaysOn                = "always_on"
+	samplerAlwaysOff               = "always_off"
+	samplerTraceIDRatio            = "traceidratio"
+	samplerParentBasedAlwaysOn     = "parentbased_always_on"
+	samplerParsedBasedAlwaysOff    = "parentbased_always_off"
+	samplerParentBasedTraceIDRatio = "parentbased_traceidratio"
+)
+
+type errUnsupportedSampler string
+
+func (e errUnsupportedSampler) Error() string {
+	return "unsupported sampler: " + string(e)
+}
+
+var (
+	errNegativeTraceIDRatio       = errors.New("invalid trace ID ratio: less than 0.0")
+	errGreaterThanOneTraceIDRatio = errors.New("invalid trace ID ratio: greater than 1.0")
+)
+
+type samplerArgParseError struct {
+	parseErr error
+}
+
+func (e samplerArgParseError) Error() string {
+	return "parsing sampler argument: " + e.parseErr.Error()
+}
+
+func (e samplerArgParseError) Unwrap() error {
+	return e.parseErr
+}
+
+func samplerFromEnv() (Sampler, error) {
+	sampler, ok := os.LookupEnv(tracesSamplerKey)
+	if !ok {
+		return nil, nil
+	}
+
+	sampler = strings.ToLower(strings.TrimSpace(sampler))
+	samplerArg, hasSamplerArg := os.LookupEnv(tracesSamplerArgKey)
+	samplerArg = strings.TrimSpace(samplerArg)
+
+	switch sampler {
+	case samplerAlwaysOn:
+		return AlwaysSample(), nil
+	case samplerAlwaysOff:
+		return NeverSample(), nil
+	case samplerTraceIDRatio:
+		if !hasSamplerArg {
+			return TraceIDRatioBased(1.0), nil
+		}
+		return parseTraceIDRatio(samplerArg)
+	case samplerParentBasedAlwaysOn:
+		return ParentBased(AlwaysSample()), nil
+	case samplerParsedBasedAlwaysOff:
+		return ParentBased(NeverSample()), nil
+	case samplerParentBasedTraceIDRatio:
+		if !hasSamplerArg {
+			return ParentBased(TraceIDRatioBased(1.0)), nil
+		}
+		ratio, err := parseTraceIDRatio(samplerArg)
+		return ParentBased(ratio), err
+	default:
+		return nil, errUnsupportedSampler(sampler)
+	}
+}
+
+func parseTraceIDRatio(arg string) (Sampler, error) {
+	v, err := strconv.ParseFloat(arg, 64)
+	if err != nil {
+		return TraceIDRatioBased(1.0), samplerArgParseError{err}
+	}
+	if v < 0.0 {
+		return TraceIDRatioBased(1.0), errNegativeTraceIDRatio
+	}
+	if v > 1.0 {
+		return TraceIDRatioBased(1.0), errGreaterThanOneTraceIDRatio
+	}
+
+	return TraceIDRatioBased(v), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
new file mode 100644
index 0000000..aa7b262
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
@@ -0,0 +1,282 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	"encoding/binary"
+	"fmt"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// Sampler decides whether a trace should be sampled and exported.
+type Sampler interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// ShouldSample returns a SamplingResult based on a decision made from the
+	// passed parameters.
+	ShouldSample(parameters SamplingParameters) SamplingResult
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Description returns information describing the Sampler.
+	Description() string
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
+
+// SamplingParameters contains the values passed to a Sampler.
+type SamplingParameters struct {
+	ParentContext context.Context
+	TraceID       trace.TraceID
+	Name          string
+	Kind          trace.SpanKind
+	Attributes    []attribute.KeyValue
+	Links         []trace.Link
+}
+
+// SamplingDecision indicates whether a span is dropped, recorded and/or sampled.
+type SamplingDecision uint8
+
+// Valid sampling decisions.
+const (
+	// Drop will not record the span and all attributes/events will be dropped.
+	Drop SamplingDecision = iota
+
+	// RecordOnly indicates the span's IsRecording method returns true, but trace.FlagsSampled flag
+	// must not be set.
+	RecordOnly
+
+	// RecordAndSample indicates the span's IsRecording method returns true and trace.FlagsSampled flag
+	// must be set.
+	RecordAndSample
+)
+
+// SamplingResult conveys a SamplingDecision, set of Attributes and a Tracestate.
+type SamplingResult struct {
+	Decision   SamplingDecision
+	Attributes []attribute.KeyValue
+	Tracestate trace.TraceState
+}
+
+type traceIDRatioSampler struct {
+	traceIDUpperBound uint64
+	description       string
+}
+
+func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult {
+	psc := trace.SpanContextFromContext(p.ParentContext)
+	x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1
+	if x < ts.traceIDUpperBound {
+		return SamplingResult{
+			Decision:   RecordAndSample,
+			Tracestate: psc.TraceState(),
+		}
+	}
+	return SamplingResult{
+		Decision:   Drop,
+		Tracestate: psc.TraceState(),
+	}
+}
+
+func (ts traceIDRatioSampler) Description() string {
+	return ts.description
+}
+
+// TraceIDRatioBased samples a given fraction of traces. Fractions >= 1 will
+// always sample. Fractions < 0 are treated as zero. To respect the
+// parent trace's `SampledFlag`, the `TraceIDRatioBased` sampler should be used
+// as a delegate of a `Parent` sampler.
+//
+//nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased`
+func TraceIDRatioBased(fraction float64) Sampler {
+	if fraction >= 1 {
+		return AlwaysSample()
+	}
+
+	if fraction <= 0 {
+		fraction = 0
+	}
+
+	return &traceIDRatioSampler{
+		traceIDUpperBound: uint64(fraction * (1 << 63)),
+		description:       fmt.Sprintf("TraceIDRatioBased{%g}", fraction),
+	}
+}
+
+type alwaysOnSampler struct{}
+
+func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult {
+	return SamplingResult{
+		Decision:   RecordAndSample,
+		Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
+	}
+}
+
+func (as alwaysOnSampler) Description() string {
+	return "AlwaysOnSampler"
+}
+
+// AlwaysSample returns a Sampler that samples every trace.
+// Be careful about using this sampler in a production application with
+// significant traffic: a new trace will be started and exported for every
+// request.
+func AlwaysSample() Sampler {
+	return alwaysOnSampler{}
+}
+
+type alwaysOffSampler struct{}
+
+func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult {
+	return SamplingResult{
+		Decision:   Drop,
+		Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
+	}
+}
+
+func (as alwaysOffSampler) Description() string {
+	return "AlwaysOffSampler"
+}
+
+// NeverSample returns a Sampler that samples no traces.
+func NeverSample() Sampler {
+	return alwaysOffSampler{}
+}
+
+// ParentBased returns a sampler decorator which behaves differently,
+// based on the parent of the span. If the span has no parent,
+// the decorated sampler is used to make sampling decision. If the span has
+// a parent, depending on whether the parent is remote and whether it
+// is sampled, one of the following samplers will apply:
+//   - remoteParentSampled(Sampler) (default: AlwaysOn)
+//   - remoteParentNotSampled(Sampler) (default: AlwaysOff)
+//   - localParentSampled(Sampler) (default: AlwaysOn)
+//   - localParentNotSampled(Sampler) (default: AlwaysOff)
+func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler {
+	return parentBased{
+		root:   root,
+		config: configureSamplersForParentBased(samplers),
+	}
+}
+
+type parentBased struct {
+	root   Sampler
+	config samplerConfig
+}
+
+func configureSamplersForParentBased(samplers []ParentBasedSamplerOption) samplerConfig {
+	c := samplerConfig{
+		remoteParentSampled:    AlwaysSample(),
+		remoteParentNotSampled: NeverSample(),
+		localParentSampled:     AlwaysSample(),
+		localParentNotSampled:  NeverSample(),
+	}
+
+	for _, so := range samplers {
+		c = so.apply(c)
+	}
+
+	return c
+}
+
+// samplerConfig is a group of options for parentBased sampler.
+type samplerConfig struct {
+	remoteParentSampled, remoteParentNotSampled Sampler
+	localParentSampled, localParentNotSampled   Sampler
+}
+
+// ParentBasedSamplerOption configures the sampler for a particular sampling case.
+type ParentBasedSamplerOption interface {
+	apply(samplerConfig) samplerConfig
+}
+
+// WithRemoteParentSampled sets the sampler for the case of sampled remote parent.
+func WithRemoteParentSampled(s Sampler) ParentBasedSamplerOption {
+	return remoteParentSampledOption{s}
+}
+
+type remoteParentSampledOption struct {
+	s Sampler
+}
+
+func (o remoteParentSampledOption) apply(config samplerConfig) samplerConfig {
+	config.remoteParentSampled = o.s
+	return config
+}
+
+// WithRemoteParentNotSampled sets the sampler for the case of remote parent
+// which is not sampled.
+func WithRemoteParentNotSampled(s Sampler) ParentBasedSamplerOption {
+	return remoteParentNotSampledOption{s}
+}
+
+type remoteParentNotSampledOption struct {
+	s Sampler
+}
+
+func (o remoteParentNotSampledOption) apply(config samplerConfig) samplerConfig {
+	config.remoteParentNotSampled = o.s
+	return config
+}
+
+// WithLocalParentSampled sets the sampler for the case of sampled local parent.
+func WithLocalParentSampled(s Sampler) ParentBasedSamplerOption {
+	return localParentSampledOption{s}
+}
+
+type localParentSampledOption struct {
+	s Sampler
+}
+
+func (o localParentSampledOption) apply(config samplerConfig) samplerConfig {
+	config.localParentSampled = o.s
+	return config
+}
+
+// WithLocalParentNotSampled sets the sampler for the case of local parent
+// which is not sampled.
+func WithLocalParentNotSampled(s Sampler) ParentBasedSamplerOption {
+	return localParentNotSampledOption{s}
+}
+
+type localParentNotSampledOption struct {
+	s Sampler
+}
+
+func (o localParentNotSampledOption) apply(config samplerConfig) samplerConfig {
+	config.localParentNotSampled = o.s
+	return config
+}
+
+func (pb parentBased) ShouldSample(p SamplingParameters) SamplingResult {
+	psc := trace.SpanContextFromContext(p.ParentContext)
+	if psc.IsValid() {
+		if psc.IsRemote() {
+			if psc.IsSampled() {
+				return pb.config.remoteParentSampled.ShouldSample(p)
+			}
+			return pb.config.remoteParentNotSampled.ShouldSample(p)
+		}
+
+		if psc.IsSampled() {
+			return pb.config.localParentSampled.ShouldSample(p)
+		}
+		return pb.config.localParentNotSampled.ShouldSample(p)
+	}
+	return pb.root.ShouldSample(p)
+}
+
+func (pb parentBased) Description() string {
+	return fmt.Sprintf("ParentBased{root:%s,remoteParentSampled:%s,"+
+		"remoteParentNotSampled:%s,localParentSampled:%s,localParentNotSampled:%s}",
+		pb.root.Description(),
+		pb.config.remoteParentSampled.Description(),
+		pb.config.remoteParentNotSampled.Description(),
+		pb.config.localParentSampled.Description(),
+		pb.config.localParentNotSampled.Description(),
+	)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
new file mode 100644
index 0000000..664e13e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
@@ -0,0 +1,121 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	"sync"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/internal/global"
+)
+
+// simpleSpanProcessor is a SpanProcessor that synchronously sends all
+// completed Spans to a trace.Exporter immediately.
+type simpleSpanProcessor struct {
+	exporterMu sync.Mutex
+	exporter   SpanExporter
+	stopOnce   sync.Once
+}
+
+var _ SpanProcessor = (*simpleSpanProcessor)(nil)
+
+// NewSimpleSpanProcessor returns a new SpanProcessor that will synchronously
+// send completed spans to the exporter immediately.
+//
+// This SpanProcessor is not recommended for production use. The synchronous
+// nature of this SpanProcessor makes it good for testing, debugging, or showing
+// examples of other features, but it will be slow and have a high computation
+// resource usage overhead. The BatchSpanProcessor is recommended for production
+// use instead.
+func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor {
+	ssp := &simpleSpanProcessor{
+		exporter: exporter,
+	}
+	global.Warn("SimpleSpanProcessor is not recommended for production use, consider using BatchSpanProcessor instead.")
+
+	return ssp
+}
+
+// OnStart does nothing.
+func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
+
+// OnEnd immediately exports a ReadOnlySpan.
+func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) {
+	ssp.exporterMu.Lock()
+	defer ssp.exporterMu.Unlock()
+
+	if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() {
+		if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil {
+			otel.Handle(err)
+		}
+	}
+}
+
+// Shutdown shuts down the exporter this SimpleSpanProcessor exports to.
+func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error {
+	var err error
+	ssp.stopOnce.Do(func() {
+		stopFunc := func(exp SpanExporter) (<-chan error, func()) {
+			done := make(chan error, 1)
+			return done, func() { done <- exp.Shutdown(ctx) }
+		}
+
+		// The exporter field of the simpleSpanProcessor needs to be zeroed to
+		// signal it is shut down, meaning all subsequent calls to OnEnd will
+		// be gracefully ignored. This needs to be done synchronously to avoid
+		// any race condition.
+		//
+		// A closure is used to keep reference to the exporter and then the
+		// field is zeroed. This ensures the simpleSpanProcessor is shut down
+		// before the exporter. This order is important as it avoids a potential
+		// deadlock. If the exporter shut down operation generates a span, that
+		// span would need to be exported. Meaning, OnEnd would be called and
+		// try acquiring the lock that is held here.
+		ssp.exporterMu.Lock()
+		done, shutdown := stopFunc(ssp.exporter)
+		ssp.exporter = nil
+		ssp.exporterMu.Unlock()
+
+		go shutdown()
+
+		// Wait for the exporter to shut down or the deadline to expire.
+		select {
+		case err = <-done:
+		case <-ctx.Done():
+			// It is possible for the exporter to have immediately shut down and
+			// the context to be done simultaneously. In that case this outer
+			// select statement will randomly choose a case. This will result in
+			// a different returned error for similar scenarios. Instead, double
+			// check if the exporter shut down at the same time and return that
+			// error if so. This will ensure consistency as well as ensure
+			// the caller knows the exporter shut down successfully (they can
+			// already determine if the deadline is expired given they passed
+			// the context).
+			select {
+			case err = <-done:
+			default:
+				err = ctx.Err()
+			}
+		}
+	})
+	return err
+}
+
+// ForceFlush does nothing as there is no data to flush.
+func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error {
+	return nil
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent
+// this Span Processor.
+func (ssp *simpleSpanProcessor) MarshalLog() interface{} {
+	return struct {
+		Type     string
+		Exporter SpanExporter
+	}{
+		Type:     "SimpleSpanProcessor",
+		Exporter: ssp.exporter,
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
new file mode 100644
index 0000000..d511d0f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
@@ -0,0 +1,133 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"time"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+	"go.opentelemetry.io/otel/sdk/resource"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// snapshot is an record of a spans state at a particular checkpointed time.
+// It is used as a read-only representation of that state.
+type snapshot struct {
+	name                  string
+	spanContext           trace.SpanContext
+	parent                trace.SpanContext
+	spanKind              trace.SpanKind
+	startTime             time.Time
+	endTime               time.Time
+	attributes            []attribute.KeyValue
+	events                []Event
+	links                 []Link
+	status                Status
+	childSpanCount        int
+	droppedAttributeCount int
+	droppedEventCount     int
+	droppedLinkCount      int
+	resource              *resource.Resource
+	instrumentationScope  instrumentation.Scope
+}
+
+var _ ReadOnlySpan = snapshot{}
+
+func (s snapshot) private() {}
+
+// Name returns the name of the span.
+func (s snapshot) Name() string {
+	return s.name
+}
+
+// SpanContext returns the unique SpanContext that identifies the span.
+func (s snapshot) SpanContext() trace.SpanContext {
+	return s.spanContext
+}
+
+// Parent returns the unique SpanContext that identifies the parent of the
+// span if one exists. If the span has no parent the returned SpanContext
+// will be invalid.
+func (s snapshot) Parent() trace.SpanContext {
+	return s.parent
+}
+
+// SpanKind returns the role the span plays in a Trace.
+func (s snapshot) SpanKind() trace.SpanKind {
+	return s.spanKind
+}
+
+// StartTime returns the time the span started recording.
+func (s snapshot) StartTime() time.Time {
+	return s.startTime
+}
+
+// EndTime returns the time the span stopped recording. It will be zero if
+// the span has not ended.
+func (s snapshot) EndTime() time.Time {
+	return s.endTime
+}
+
+// Attributes returns the defining attributes of the span.
+func (s snapshot) Attributes() []attribute.KeyValue {
+	return s.attributes
+}
+
+// Links returns all the links the span has to other spans.
+func (s snapshot) Links() []Link {
+	return s.links
+}
+
+// Events returns all the events that occurred within in the spans
+// lifetime.
+func (s snapshot) Events() []Event {
+	return s.events
+}
+
+// Status returns the spans status.
+func (s snapshot) Status() Status {
+	return s.status
+}
+
+// InstrumentationScope returns information about the instrumentation
+// scope that created the span.
+func (s snapshot) InstrumentationScope() instrumentation.Scope {
+	return s.instrumentationScope
+}
+
+// InstrumentationLibrary returns information about the instrumentation
+// library that created the span.
+func (s snapshot) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility
+	return s.instrumentationScope
+}
+
+// Resource returns information about the entity that produced the span.
+func (s snapshot) Resource() *resource.Resource {
+	return s.resource
+}
+
+// DroppedAttributes returns the number of attributes dropped by the span
+// due to limits being reached.
+func (s snapshot) DroppedAttributes() int {
+	return s.droppedAttributeCount
+}
+
+// DroppedLinks returns the number of links dropped by the span due to limits
+// being reached.
+func (s snapshot) DroppedLinks() int {
+	return s.droppedLinkCount
+}
+
+// DroppedEvents returns the number of events dropped by the span due to
+// limits being reached.
+func (s snapshot) DroppedEvents() int {
+	return s.droppedEventCount
+}
+
+// ChildSpanCount returns the count of spans that consider the span a
+// direct parent.
+func (s snapshot) ChildSpanCount() int {
+	return s.childSpanCount
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
new file mode 100644
index 0000000..1785a4b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
@@ -0,0 +1,937 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	"fmt"
+	"reflect"
+	"runtime"
+	rt "runtime/trace"
+	"slices"
+	"strings"
+	"sync"
+	"time"
+	"unicode/utf8"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/internal/global"
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+	"go.opentelemetry.io/otel/sdk/resource"
+	semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+	"go.opentelemetry.io/otel/trace"
+	"go.opentelemetry.io/otel/trace/embedded"
+)
+
+// ReadOnlySpan allows reading information from the data structure underlying a
+// trace.Span. It is used in places where reading information from a span is
+// necessary but changing the span isn't necessary or allowed.
+//
+// Warning: methods may be added to this interface in minor releases.
+type ReadOnlySpan interface {
+	// Name returns the name of the span.
+	Name() string
+	// SpanContext returns the unique SpanContext that identifies the span.
+	SpanContext() trace.SpanContext
+	// Parent returns the unique SpanContext that identifies the parent of the
+	// span if one exists. If the span has no parent the returned SpanContext
+	// will be invalid.
+	Parent() trace.SpanContext
+	// SpanKind returns the role the span plays in a Trace.
+	SpanKind() trace.SpanKind
+	// StartTime returns the time the span started recording.
+	StartTime() time.Time
+	// EndTime returns the time the span stopped recording. It will be zero if
+	// the span has not ended.
+	EndTime() time.Time
+	// Attributes returns the defining attributes of the span.
+	// The order of the returned attributes is not guaranteed to be stable across invocations.
+	Attributes() []attribute.KeyValue
+	// Links returns all the links the span has to other spans.
+	Links() []Link
+	// Events returns all the events that occurred within in the spans
+	// lifetime.
+	Events() []Event
+	// Status returns the spans status.
+	Status() Status
+	// InstrumentationScope returns information about the instrumentation
+	// scope that created the span.
+	InstrumentationScope() instrumentation.Scope
+	// InstrumentationLibrary returns information about the instrumentation
+	// library that created the span.
+	// Deprecated: please use InstrumentationScope instead.
+	InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility
+	// Resource returns information about the entity that produced the span.
+	Resource() *resource.Resource
+	// DroppedAttributes returns the number of attributes dropped by the span
+	// due to limits being reached.
+	DroppedAttributes() int
+	// DroppedLinks returns the number of links dropped by the span due to
+	// limits being reached.
+	DroppedLinks() int
+	// DroppedEvents returns the number of events dropped by the span due to
+	// limits being reached.
+	DroppedEvents() int
+	// ChildSpanCount returns the count of spans that consider the span a
+	// direct parent.
+	ChildSpanCount() int
+
+	// A private method to prevent users implementing the
+	// interface and so future additions to it will not
+	// violate compatibility.
+	private()
+}
+
+// ReadWriteSpan exposes the same methods as trace.Span and in addition allows
+// reading information from the underlying data structure.
+// This interface exposes the union of the methods of trace.Span (which is a
+// "write-only" span) and ReadOnlySpan. New methods for writing or reading span
+// information should be added under trace.Span or ReadOnlySpan, respectively.
+//
+// Warning: methods may be added to this interface in minor releases.
+type ReadWriteSpan interface {
+	trace.Span
+	ReadOnlySpan
+}
+
+// recordingSpan is an implementation of the OpenTelemetry Span API
+// representing the individual component of a trace that is sampled.
+type recordingSpan struct {
+	embedded.Span
+
+	// mu protects the contents of this span.
+	mu sync.Mutex
+
+	// parent holds the parent span of this span as a trace.SpanContext.
+	parent trace.SpanContext
+
+	// spanKind represents the kind of this span as a trace.SpanKind.
+	spanKind trace.SpanKind
+
+	// name is the name of this span.
+	name string
+
+	// startTime is the time at which this span was started.
+	startTime time.Time
+
+	// endTime is the time at which this span was ended. It contains the zero
+	// value of time.Time until the span is ended.
+	endTime time.Time
+
+	// status is the status of this span.
+	status Status
+
+	// childSpanCount holds the number of child spans created for this span.
+	childSpanCount int
+
+	// spanContext holds the SpanContext of this span.
+	spanContext trace.SpanContext
+
+	// attributes is a collection of user provided key/values. The collection
+	// is constrained by a configurable maximum held by the parent
+	// TracerProvider. When additional attributes are added after this maximum
+	// is reached these attributes the user is attempting to add are dropped.
+	// This dropped number of attributes is tracked and reported in the
+	// ReadOnlySpan exported when the span ends.
+	attributes        []attribute.KeyValue
+	droppedAttributes int
+	logDropAttrsOnce  sync.Once
+
+	// events are stored in FIFO queue capped by configured limit.
+	events evictedQueue[Event]
+
+	// links are stored in FIFO queue capped by configured limit.
+	links evictedQueue[Link]
+
+	// executionTracerTaskEnd ends the execution tracer span.
+	executionTracerTaskEnd func()
+
+	// tracer is the SDK tracer that created this span.
+	tracer *tracer
+}
+
+var (
+	_ ReadWriteSpan = (*recordingSpan)(nil)
+	_ runtimeTracer = (*recordingSpan)(nil)
+)
+
+// SpanContext returns the SpanContext of this span.
+func (s *recordingSpan) SpanContext() trace.SpanContext {
+	if s == nil {
+		return trace.SpanContext{}
+	}
+	return s.spanContext
+}
+
+// IsRecording returns if this span is being recorded. If this span has ended
+// this will return false.
+func (s *recordingSpan) IsRecording() bool {
+	if s == nil {
+		return false
+	}
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	return s.isRecording()
+}
+
+// isRecording returns if this span is being recorded. If this span has ended
+// this will return false.
+//
+// This method assumes s.mu.Lock is held by the caller.
+func (s *recordingSpan) isRecording() bool {
+	if s == nil {
+		return false
+	}
+	return s.endTime.IsZero()
+}
+
+// SetStatus sets the status of the Span in the form of a code and a
+// description, overriding previous values set. The description is only
+// included in the set status when the code is for an error. If this span is
+// not being recorded than this method does nothing.
+func (s *recordingSpan) SetStatus(code codes.Code, description string) {
+	if s == nil {
+		return
+	}
+
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if !s.isRecording() {
+		return
+	}
+	if s.status.Code > code {
+		return
+	}
+
+	status := Status{Code: code}
+	if code == codes.Error {
+		status.Description = description
+	}
+
+	s.status = status
+}
+
+// SetAttributes sets attributes of this span.
+//
+// If a key from attributes already exists the value associated with that key
+// will be overwritten with the value contained in attributes.
+//
+// If this span is not being recorded than this method does nothing.
+//
+// If adding attributes to the span would exceed the maximum amount of
+// attributes the span is configured to have, the last added attributes will
+// be dropped.
+func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
+	if s == nil || len(attributes) == 0 {
+		return
+	}
+
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if !s.isRecording() {
+		return
+	}
+
+	limit := s.tracer.provider.spanLimits.AttributeCountLimit
+	if limit == 0 {
+		// No attributes allowed.
+		s.addDroppedAttr(len(attributes))
+		return
+	}
+
+	// If adding these attributes could exceed the capacity of s perform a
+	// de-duplication and truncation while adding to avoid over allocation.
+	if limit > 0 && len(s.attributes)+len(attributes) > limit {
+		s.addOverCapAttrs(limit, attributes)
+		return
+	}
+
+	// Otherwise, add without deduplication. When attributes are read they
+	// will be deduplicated, optimizing the operation.
+	s.attributes = slices.Grow(s.attributes, len(attributes))
+	for _, a := range attributes {
+		if !a.Valid() {
+			// Drop all invalid attributes.
+			s.addDroppedAttr(1)
+			continue
+		}
+		a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
+		s.attributes = append(s.attributes, a)
+	}
+}
+
+// Declared as a var so tests can override.
+var logDropAttrs = func() {
+	global.Warn("limit reached: dropping trace Span attributes")
+}
+
+// addDroppedAttr adds incr to the count of dropped attributes.
+//
+// The first, and only the first, time this method is called a warning will be
+// logged.
+//
+// This method assumes s.mu.Lock is held by the caller.
+func (s *recordingSpan) addDroppedAttr(incr int) {
+	s.droppedAttributes += incr
+	s.logDropAttrsOnce.Do(logDropAttrs)
+}
+
+// addOverCapAttrs adds the attributes attrs to the span s while
+// de-duplicating the attributes of s and attrs and dropping attributes that
+// exceed the limit.
+//
+// This method assumes s.mu.Lock is held by the caller.
+//
+// This method should only be called when there is a possibility that adding
+// attrs to s will exceed the limit. Otherwise, attrs should be added to s
+// without checking for duplicates and all retrieval methods of the attributes
+// for s will de-duplicate as needed.
+//
+// This method assumes limit is a value > 0. The argument should be validated
+// by the caller.
+func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) {
+	// In order to not allocate more capacity to s.attributes than needed,
+	// prune and truncate this addition of attributes while adding.
+
+	// Do not set a capacity when creating this map. Benchmark testing has
+	// showed this to only add unused memory allocations in general use.
+	exists := make(map[attribute.Key]int, len(s.attributes))
+	s.dedupeAttrsFromRecord(exists)
+
+	// Now that s.attributes is deduplicated, adding unique attributes up to
+	// the capacity of s will not over allocate s.attributes.
+
+	// max size = limit
+	maxCap := min(len(attrs)+len(s.attributes), limit)
+	if cap(s.attributes) < maxCap {
+		s.attributes = slices.Grow(s.attributes, maxCap-cap(s.attributes))
+	}
+	for _, a := range attrs {
+		if !a.Valid() {
+			// Drop all invalid attributes.
+			s.addDroppedAttr(1)
+			continue
+		}
+
+		if idx, ok := exists[a.Key]; ok {
+			// Perform all updates before dropping, even when at capacity.
+			a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
+			s.attributes[idx] = a
+			continue
+		}
+
+		if len(s.attributes) >= limit {
+			// Do not just drop all of the remaining attributes, make sure
+			// updates are checked and performed.
+			s.addDroppedAttr(1)
+		} else {
+			a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
+			s.attributes = append(s.attributes, a)
+			exists[a.Key] = len(s.attributes) - 1
+		}
+	}
+}
+
+// truncateAttr returns a truncated version of attr. Only string and string
+// slice attribute values are truncated. String values are truncated to at
+// most a length of limit. Each string slice value is truncated in this fashion
+// (the slice length itself is unaffected).
+//
+// No truncation is performed for a negative limit.
+func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue {
+	if limit < 0 {
+		return attr
+	}
+	switch attr.Value.Type() {
+	case attribute.STRING:
+		v := attr.Value.AsString()
+		return attr.Key.String(truncate(limit, v))
+	case attribute.STRINGSLICE:
+		v := attr.Value.AsStringSlice()
+		for i := range v {
+			v[i] = truncate(limit, v[i])
+		}
+		return attr.Key.StringSlice(v)
+	}
+	return attr
+}
+
+// truncate returns a truncated version of s such that it contains less than
+// the limit number of characters. Truncation is applied by returning the limit
+// number of valid characters contained in s.
+//
+// If limit is negative, it returns the original string.
+//
+// UTF-8 is supported. When truncating, all invalid characters are dropped
+// before applying truncation.
+//
+// If s already contains less than the limit number of bytes, it is returned
+// unchanged. No invalid characters are removed.
+func truncate(limit int, s string) string {
+	// This prioritize performance in the following order based on the most
+	// common expected use-cases.
+	//
+	//  - Short values less than the default limit (128).
+	//  - Strings with valid encodings that exceed the limit.
+	//  - No limit.
+	//  - Strings with invalid encodings that exceed the limit.
+	if limit < 0 || len(s) <= limit {
+		return s
+	}
+
+	// Optimistically, assume all valid UTF-8.
+	var b strings.Builder
+	count := 0
+	for i, c := range s {
+		if c != utf8.RuneError {
+			count++
+			if count > limit {
+				return s[:i]
+			}
+			continue
+		}
+
+		_, size := utf8.DecodeRuneInString(s[i:])
+		if size == 1 {
+			// Invalid encoding.
+			b.Grow(len(s) - 1)
+			_, _ = b.WriteString(s[:i])
+			s = s[i:]
+			break
+		}
+	}
+
+	// Fast-path, no invalid input.
+	if b.Cap() == 0 {
+		return s
+	}
+
+	// Truncate while validating UTF-8.
+	for i := 0; i < len(s) && count < limit; {
+		c := s[i]
+		if c < utf8.RuneSelf {
+			// Optimization for single byte runes (common case).
+			_ = b.WriteByte(c)
+			i++
+			count++
+			continue
+		}
+
+		_, size := utf8.DecodeRuneInString(s[i:])
+		if size == 1 {
+			// We checked for all 1-byte runes above, this is a RuneError.
+			i++
+			continue
+		}
+
+		_, _ = b.WriteString(s[i : i+size])
+		i += size
+		count++
+	}
+
+	return b.String()
+}
+
+// End ends the span. This method does nothing if the span is already ended or
+// is not being recorded.
+//
+// The only SpanEndOption currently supported are [trace.WithTimestamp], and
+// [trace.WithStackTrace].
+//
+// If this method is called while panicking an error event is added to the
+// Span before ending it and the panic is continued.
+func (s *recordingSpan) End(options ...trace.SpanEndOption) {
+	// Do not start by checking if the span is being recorded which requires
+	// acquiring a lock. Make a minimal check that the span is not nil.
+	if s == nil {
+		return
+	}
+
+	// Store the end time as soon as possible to avoid artificially increasing
+	// the span's duration in case some operation below takes a while.
+	et := monotonicEndTime(s.startTime)
+
+	// Lock the span now that we have an end time and see if we need to do any more processing.
+	s.mu.Lock()
+	if !s.isRecording() {
+		s.mu.Unlock()
+		return
+	}
+
+	config := trace.NewSpanEndConfig(options...)
+	if recovered := recover(); recovered != nil {
+		// Record but don't stop the panic.
+		defer panic(recovered)
+		opts := []trace.EventOption{
+			trace.WithAttributes(
+				semconv.ExceptionType(typeStr(recovered)),
+				semconv.ExceptionMessage(fmt.Sprint(recovered)),
+			),
+		}
+
+		if config.StackTrace() {
+			opts = append(opts, trace.WithAttributes(
+				semconv.ExceptionStacktrace(recordStackTrace()),
+			))
+		}
+
+		s.addEvent(semconv.ExceptionEventName, opts...)
+	}
+
+	if s.executionTracerTaskEnd != nil {
+		s.mu.Unlock()
+		s.executionTracerTaskEnd()
+		s.mu.Lock()
+	}
+
+	// Setting endTime to non-zero marks the span as ended and not recording.
+	if config.Timestamp().IsZero() {
+		s.endTime = et
+	} else {
+		s.endTime = config.Timestamp()
+	}
+	s.mu.Unlock()
+
+	sps := s.tracer.provider.getSpanProcessors()
+	if len(sps) == 0 {
+		return
+	}
+	snap := s.snapshot()
+	for _, sp := range sps {
+		sp.sp.OnEnd(snap)
+	}
+}
+
+// monotonicEndTime returns the end time at present but offset from start,
+// monotonically.
+//
+// The monotonic clock is used in subtractions hence the duration since start
+// added back to start gives end as a monotonic time. See
+// https://golang.org/pkg/time/#hdr-Monotonic_Clocks
+func monotonicEndTime(start time.Time) time.Time {
+	return start.Add(time.Since(start))
+}
+
+// RecordError will record err as a span event for this span. An additional call to
+// SetStatus is required if the Status of the Span should be set to Error, this method
+// does not change the Span status. If this span is not being recorded or err is nil
+// than this method does nothing.
+func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) {
+	if s == nil || err == nil {
+		return
+	}
+
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if !s.isRecording() {
+		return
+	}
+
+	opts = append(opts, trace.WithAttributes(
+		semconv.ExceptionType(typeStr(err)),
+		semconv.ExceptionMessage(err.Error()),
+	))
+
+	c := trace.NewEventConfig(opts...)
+	if c.StackTrace() {
+		opts = append(opts, trace.WithAttributes(
+			semconv.ExceptionStacktrace(recordStackTrace()),
+		))
+	}
+
+	s.addEvent(semconv.ExceptionEventName, opts...)
+}
+
+func typeStr(i interface{}) string {
+	t := reflect.TypeOf(i)
+	if t.PkgPath() == "" && t.Name() == "" {
+		// Likely a builtin type.
+		return t.String()
+	}
+	return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+}
+
+func recordStackTrace() string {
+	stackTrace := make([]byte, 2048)
+	n := runtime.Stack(stackTrace, false)
+
+	return string(stackTrace[0:n])
+}
+
+// AddEvent adds an event with the provided name and options. If this span is
+// not being recorded then this method does nothing.
+func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) {
+	if s == nil {
+		return
+	}
+
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if !s.isRecording() {
+		return
+	}
+	s.addEvent(name, o...)
+}
+
+// addEvent adds an event with the provided name and options.
+//
+// This method assumes s.mu.Lock is held by the caller.
+func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) {
+	c := trace.NewEventConfig(o...)
+	e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()}
+
+	// Discard attributes over limit.
+	limit := s.tracer.provider.spanLimits.AttributePerEventCountLimit
+	if limit == 0 {
+		// Drop all attributes.
+		e.DroppedAttributeCount = len(e.Attributes)
+		e.Attributes = nil
+	} else if limit > 0 && len(e.Attributes) > limit {
+		// Drop over capacity.
+		e.DroppedAttributeCount = len(e.Attributes) - limit
+		e.Attributes = e.Attributes[:limit]
+	}
+
+	s.events.add(e)
+}
+
+// SetName sets the name of this span. If this span is not being recorded than
+// this method does nothing.
+func (s *recordingSpan) SetName(name string) {
+	if s == nil {
+		return
+	}
+
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if !s.isRecording() {
+		return
+	}
+	s.name = name
+}
+
+// Name returns the name of this span.
+func (s *recordingSpan) Name() string {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.name
+}
+
+// Name returns the SpanContext of this span's parent span.
+func (s *recordingSpan) Parent() trace.SpanContext {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.parent
+}
+
+// SpanKind returns the SpanKind of this span.
+func (s *recordingSpan) SpanKind() trace.SpanKind {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.spanKind
+}
+
+// StartTime returns the time this span started.
+func (s *recordingSpan) StartTime() time.Time {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.startTime
+}
+
+// EndTime returns the time this span ended. For spans that have not yet
+// ended, the returned value will be the zero value of time.Time.
+func (s *recordingSpan) EndTime() time.Time {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.endTime
+}
+
+// Attributes returns the attributes of this span.
+//
+// The order of the returned attributes is not guaranteed to be stable.
+func (s *recordingSpan) Attributes() []attribute.KeyValue {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	s.dedupeAttrs()
+	return s.attributes
+}
+
+// dedupeAttrs deduplicates the attributes of s to fit capacity.
+//
+// This method assumes s.mu.Lock is held by the caller.
+func (s *recordingSpan) dedupeAttrs() {
+	// Do not set a capacity when creating this map. Benchmark testing has
+	// showed this to only add unused memory allocations in general use.
+	exists := make(map[attribute.Key]int, len(s.attributes))
+	s.dedupeAttrsFromRecord(exists)
+}
+
+// dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity
+// using record as the record of unique attribute keys to their index.
+//
+// This method assumes s.mu.Lock is held by the caller.
+func (s *recordingSpan) dedupeAttrsFromRecord(record map[attribute.Key]int) {
+	// Use the fact that slices share the same backing array.
+	unique := s.attributes[:0]
+	for _, a := range s.attributes {
+		if idx, ok := record[a.Key]; ok {
+			unique[idx] = a
+		} else {
+			unique = append(unique, a)
+			record[a.Key] = len(unique) - 1
+		}
+	}
+	clear(s.attributes[len(unique):]) // Erase unneeded elements to let GC collect objects.
+	s.attributes = unique
+}
+
+// Links returns the links of this span.
+func (s *recordingSpan) Links() []Link {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if len(s.links.queue) == 0 {
+		return []Link{}
+	}
+	return s.links.copy()
+}
+
+// Events returns the events of this span.
+func (s *recordingSpan) Events() []Event {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if len(s.events.queue) == 0 {
+		return []Event{}
+	}
+	return s.events.copy()
+}
+
+// Status returns the status of this span.
+func (s *recordingSpan) Status() Status {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.status
+}
+
+// InstrumentationScope returns the instrumentation.Scope associated with
+// the Tracer that created this span.
+func (s *recordingSpan) InstrumentationScope() instrumentation.Scope {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.tracer.instrumentationScope
+}
+
+// InstrumentationLibrary returns the instrumentation.Library associated with
+// the Tracer that created this span.
+func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.tracer.instrumentationScope
+}
+
+// Resource returns the Resource associated with the Tracer that created this
+// span.
+func (s *recordingSpan) Resource() *resource.Resource {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.tracer.provider.resource
+}
+
+func (s *recordingSpan) AddLink(link trace.Link) {
+	if s == nil {
+		return
+	}
+	if !link.SpanContext.IsValid() && len(link.Attributes) == 0 &&
+		link.SpanContext.TraceState().Len() == 0 {
+		return
+	}
+
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if !s.isRecording() {
+		return
+	}
+
+	l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes}
+
+	// Discard attributes over limit.
+	limit := s.tracer.provider.spanLimits.AttributePerLinkCountLimit
+	if limit == 0 {
+		// Drop all attributes.
+		l.DroppedAttributeCount = len(l.Attributes)
+		l.Attributes = nil
+	} else if limit > 0 && len(l.Attributes) > limit {
+		l.DroppedAttributeCount = len(l.Attributes) - limit
+		l.Attributes = l.Attributes[:limit]
+	}
+
+	s.links.add(l)
+}
+
+// DroppedAttributes returns the number of attributes dropped by the span
+// due to limits being reached.
+func (s *recordingSpan) DroppedAttributes() int {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.droppedAttributes
+}
+
+// DroppedLinks returns the number of links dropped by the span due to limits
+// being reached.
+func (s *recordingSpan) DroppedLinks() int {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.links.droppedCount
+}
+
+// DroppedEvents returns the number of events dropped by the span due to
+// limits being reached.
+func (s *recordingSpan) DroppedEvents() int {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.events.droppedCount
+}
+
+// ChildSpanCount returns the count of spans that consider the span a
+// direct parent.
+func (s *recordingSpan) ChildSpanCount() int {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.childSpanCount
+}
+
+// TracerProvider returns a trace.TracerProvider that can be used to generate
+// additional Spans on the same telemetry pipeline as the current Span.
+func (s *recordingSpan) TracerProvider() trace.TracerProvider {
+	return s.tracer.provider
+}
+
+// snapshot creates a read-only copy of the current state of the span.
+func (s *recordingSpan) snapshot() ReadOnlySpan {
+	var sd snapshot
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	sd.endTime = s.endTime
+	sd.instrumentationScope = s.tracer.instrumentationScope
+	sd.name = s.name
+	sd.parent = s.parent
+	sd.resource = s.tracer.provider.resource
+	sd.spanContext = s.spanContext
+	sd.spanKind = s.spanKind
+	sd.startTime = s.startTime
+	sd.status = s.status
+	sd.childSpanCount = s.childSpanCount
+
+	if len(s.attributes) > 0 {
+		s.dedupeAttrs()
+		sd.attributes = s.attributes
+	}
+	sd.droppedAttributeCount = s.droppedAttributes
+	if len(s.events.queue) > 0 {
+		sd.events = s.events.copy()
+		sd.droppedEventCount = s.events.droppedCount
+	}
+	if len(s.links.queue) > 0 {
+		sd.links = s.links.copy()
+		sd.droppedLinkCount = s.links.droppedCount
+	}
+	return &sd
+}
+
+func (s *recordingSpan) addChild() {
+	if s == nil {
+		return
+	}
+
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if !s.isRecording() {
+		return
+	}
+	s.childSpanCount++
+}
+
+func (*recordingSpan) private() {}
+
+// runtimeTrace starts a "runtime/trace".Task for the span and returns a
+// context containing the task.
+func (s *recordingSpan) runtimeTrace(ctx context.Context) context.Context {
+	if !rt.IsEnabled() {
+		// Avoid additional overhead if runtime/trace is not enabled.
+		return ctx
+	}
+	nctx, task := rt.NewTask(ctx, s.name)
+
+	s.mu.Lock()
+	s.executionTracerTaskEnd = task.End
+	s.mu.Unlock()
+
+	return nctx
+}
+
+// nonRecordingSpan is a minimal implementation of the OpenTelemetry Span API
+// that wraps a SpanContext. It performs no operations other than to return
+// the wrapped SpanContext or TracerProvider that created it.
+type nonRecordingSpan struct {
+	embedded.Span
+
+	// tracer is the SDK tracer that created this span.
+	tracer *tracer
+	sc     trace.SpanContext
+}
+
+var _ trace.Span = nonRecordingSpan{}
+
+// SpanContext returns the wrapped SpanContext.
+func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc }
+
+// IsRecording always returns false.
+func (nonRecordingSpan) IsRecording() bool { return false }
+
+// SetStatus does nothing.
+func (nonRecordingSpan) SetStatus(codes.Code, string) {}
+
+// SetError does nothing.
+func (nonRecordingSpan) SetError(bool) {}
+
+// SetAttributes does nothing.
+func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {}
+
+// End does nothing.
+func (nonRecordingSpan) End(...trace.SpanEndOption) {}
+
+// RecordError does nothing.
+func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {}
+
+// AddEvent does nothing.
+func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {}
+
+// AddLink does nothing.
+func (nonRecordingSpan) AddLink(trace.Link) {}
+
+// SetName does nothing.
+func (nonRecordingSpan) SetName(string) {}
+
+// TracerProvider returns the trace.TracerProvider that provided the Tracer
+// that created this span.
+func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider }
+
+func isRecording(s SamplingResult) bool {
+	return s.Decision == RecordOnly || s.Decision == RecordAndSample
+}
+
+func isSampled(s SamplingResult) bool {
+	return s.Decision == RecordAndSample
+}
+
+// Status is the classified state of a Span.
+type Status struct {
+	// Code is an identifier of a Spans state classification.
+	Code codes.Code
+	// Description is a user hint about why that status was set. It is only
+	// applicable when Code is Error.
+	Description string
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go
new file mode 100644
index 0000000..6bdda3d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go
@@ -0,0 +1,36 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import "context"
+
+// SpanExporter handles the delivery of spans to external receivers. This is
+// the final component in the trace export pipeline.
+type SpanExporter interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// ExportSpans exports a batch of spans.
+	//
+	// This function is called synchronously, so there is no concurrency
+	// safety requirement. However, due to the synchronous calling pattern,
+	// it is critical that all timeouts and cancellations contained in the
+	// passed context must be honored.
+	//
+	// Any retry logic must be contained in this function. The SDK that
+	// calls this function will not implement any retry logic. All errors
+	// returned by this function are considered unrecoverable and will be
+	// reported to a configured error Handler.
+	ExportSpans(ctx context.Context, spans []ReadOnlySpan) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Shutdown notifies the exporter of a pending halt to operations. The
+	// exporter is expected to perform any cleanup or synchronization it
+	// requires while honoring all timeouts and cancellations contained in
+	// the passed context.
+	Shutdown(ctx context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go
new file mode 100644
index 0000000..bec5e20
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go
@@ -0,0 +1,114 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import "go.opentelemetry.io/otel/sdk/internal/env"
+
+const (
+	// DefaultAttributeValueLengthLimit is the default maximum allowed
+	// attribute value length, unlimited.
+	DefaultAttributeValueLengthLimit = -1
+
+	// DefaultAttributeCountLimit is the default maximum number of attributes
+	// a span can have.
+	DefaultAttributeCountLimit = 128
+
+	// DefaultEventCountLimit is the default maximum number of events a span
+	// can have.
+	DefaultEventCountLimit = 128
+
+	// DefaultLinkCountLimit is the default maximum number of links a span can
+	// have.
+	DefaultLinkCountLimit = 128
+
+	// DefaultAttributePerEventCountLimit is the default maximum number of
+	// attributes a span event can have.
+	DefaultAttributePerEventCountLimit = 128
+
+	// DefaultAttributePerLinkCountLimit is the default maximum number of
+	// attributes a span link can have.
+	DefaultAttributePerLinkCountLimit = 128
+)
+
+// SpanLimits represents the limits of a span.
+type SpanLimits struct {
+	// AttributeValueLengthLimit is the maximum allowed attribute value length.
+	//
+	// This limit only applies to string and string slice attribute values.
+	// Any string longer than this value will be truncated to this length.
+	//
+	// Setting this to a negative value means no limit is applied.
+	AttributeValueLengthLimit int
+
+	// AttributeCountLimit is the maximum allowed span attribute count. Any
+	// attribute added to a span once this limit is reached will be dropped.
+	//
+	// Setting this to zero means no attributes will be recorded.
+	//
+	// Setting this to a negative value means no limit is applied.
+	AttributeCountLimit int
+
+	// EventCountLimit is the maximum allowed span event count. Any event
+	// added to a span once this limit is reached means it will be added but
+	// the oldest event will be dropped.
+	//
+	// Setting this to zero means no events we be recorded.
+	//
+	// Setting this to a negative value means no limit is applied.
+	EventCountLimit int
+
+	// LinkCountLimit is the maximum allowed span link count. Any link added
+	// to a span once this limit is reached means it will be added but the
+	// oldest link will be dropped.
+	//
+	// Setting this to zero means no links we be recorded.
+	//
+	// Setting this to a negative value means no limit is applied.
+	LinkCountLimit int
+
+	// AttributePerEventCountLimit is the maximum number of attributes allowed
+	// per span event. Any attribute added after this limit reached will be
+	// dropped.
+	//
+	// Setting this to zero means no attributes will be recorded for events.
+	//
+	// Setting this to a negative value means no limit is applied.
+	AttributePerEventCountLimit int
+
+	// AttributePerLinkCountLimit is the maximum number of attributes allowed
+	// per span link. Any attribute added after this limit reached will be
+	// dropped.
+	//
+	// Setting this to zero means no attributes will be recorded for links.
+	//
+	// Setting this to a negative value means no limit is applied.
+	AttributePerLinkCountLimit int
+}
+
+// NewSpanLimits returns a SpanLimits with all limits set to the value their
+// corresponding environment variable holds, or the default if unset.
+//
+// • AttributeValueLengthLimit: OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT
+// (default: unlimited)
+//
+// • AttributeCountLimit: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT (default: 128)
+//
+// • EventCountLimit: OTEL_SPAN_EVENT_COUNT_LIMIT (default: 128)
+//
+// • AttributePerEventCountLimit: OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT (default:
+// 128)
+//
+// • LinkCountLimit: OTEL_SPAN_LINK_COUNT_LIMIT (default: 128)
+//
+// • AttributePerLinkCountLimit: OTEL_LINK_ATTRIBUTE_COUNT_LIMIT (default: 128)
+func NewSpanLimits() SpanLimits {
+	return SpanLimits{
+		AttributeValueLengthLimit:   env.SpanAttributeValueLength(DefaultAttributeValueLengthLimit),
+		AttributeCountLimit:         env.SpanAttributeCount(DefaultAttributeCountLimit),
+		EventCountLimit:             env.SpanEventCount(DefaultEventCountLimit),
+		LinkCountLimit:              env.SpanLinkCount(DefaultLinkCountLimit),
+		AttributePerEventCountLimit: env.SpanEventAttributeCount(DefaultAttributePerEventCountLimit),
+		AttributePerLinkCountLimit:  env.SpanLinkAttributeCount(DefaultAttributePerLinkCountLimit),
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go
new file mode 100644
index 0000000..af7f917
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go
@@ -0,0 +1,61 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	"sync"
+)
+
+// SpanProcessor is a processing pipeline for spans in the trace signal.
+// SpanProcessors registered with a TracerProvider and are called at the start
+// and end of a Span's lifecycle, and are called in the order they are
+// registered.
+type SpanProcessor interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// OnStart is called when a span is started. It is called synchronously
+	// and should not block.
+	OnStart(parent context.Context, s ReadWriteSpan)
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// OnEnd is called when span is finished. It is called synchronously and
+	// hence not block.
+	OnEnd(s ReadOnlySpan)
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Shutdown is called when the SDK shuts down. Any cleanup or release of
+	// resources held by the processor should be done in this call.
+	//
+	// Calls to OnStart, OnEnd, or ForceFlush after this has been called
+	// should be ignored.
+	//
+	// All timeouts and cancellations contained in ctx must be honored, this
+	// should not block indefinitely.
+	Shutdown(ctx context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// ForceFlush exports all ended spans to the configured Exporter that have not yet
+	// been exported.  It should only be called when absolutely necessary, such as when
+	// using a FaaS provider that may suspend the process after an invocation, but before
+	// the Processor can export the completed spans.
+	ForceFlush(ctx context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
+
+type spanProcessorState struct {
+	sp    SpanProcessor
+	state sync.Once
+}
+
+func newSpanProcessorState(sp SpanProcessor) *spanProcessorState {
+	return &spanProcessorState{sp: sp}
+}
+
+type spanProcessorStates []*spanProcessorState
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
new file mode 100644
index 0000000..0b65ae9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
@@ -0,0 +1,162 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	"time"
+
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+	"go.opentelemetry.io/otel/trace"
+	"go.opentelemetry.io/otel/trace/embedded"
+)
+
+type tracer struct {
+	embedded.Tracer
+
+	provider             *TracerProvider
+	instrumentationScope instrumentation.Scope
+}
+
+var _ trace.Tracer = &tracer{}
+
+// Start starts a Span and returns it along with a context containing it.
+//
+// The Span is created with the provided name and as a child of any existing
+// span context found in the passed context. The created Span will be
+// configured appropriately by any SpanOption passed.
+func (tr *tracer) Start(
+	ctx context.Context,
+	name string,
+	options ...trace.SpanStartOption,
+) (context.Context, trace.Span) {
+	config := trace.NewSpanStartConfig(options...)
+
+	if ctx == nil {
+		// Prevent trace.ContextWithSpan from panicking.
+		ctx = context.Background()
+	}
+
+	// For local spans created by this SDK, track child span count.
+	if p := trace.SpanFromContext(ctx); p != nil {
+		if sdkSpan, ok := p.(*recordingSpan); ok {
+			sdkSpan.addChild()
+		}
+	}
+
+	s := tr.newSpan(ctx, name, &config)
+	if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() {
+		sps := tr.provider.getSpanProcessors()
+		for _, sp := range sps {
+			sp.sp.OnStart(ctx, rw)
+		}
+	}
+	if rtt, ok := s.(runtimeTracer); ok {
+		ctx = rtt.runtimeTrace(ctx)
+	}
+
+	return trace.ContextWithSpan(ctx, s), s
+}
+
+type runtimeTracer interface {
+	// runtimeTrace starts a "runtime/trace".Task for the span and
+	// returns a context containing the task.
+	runtimeTrace(ctx context.Context) context.Context
+}
+
+// newSpan returns a new configured span.
+func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanConfig) trace.Span {
+	// If told explicitly to make this a new root use a zero value SpanContext
+	// as a parent which contains an invalid trace ID and is not remote.
+	var psc trace.SpanContext
+	if config.NewRoot() {
+		ctx = trace.ContextWithSpanContext(ctx, psc)
+	} else {
+		psc = trace.SpanContextFromContext(ctx)
+	}
+
+	// If there is a valid parent trace ID, use it to ensure the continuity of
+	// the trace. Always generate a new span ID so other components can rely
+	// on a unique span ID, even if the Span is non-recording.
+	var tid trace.TraceID
+	var sid trace.SpanID
+	if !psc.TraceID().IsValid() {
+		tid, sid = tr.provider.idGenerator.NewIDs(ctx)
+	} else {
+		tid = psc.TraceID()
+		sid = tr.provider.idGenerator.NewSpanID(ctx, tid)
+	}
+
+	samplingResult := tr.provider.sampler.ShouldSample(SamplingParameters{
+		ParentContext: ctx,
+		TraceID:       tid,
+		Name:          name,
+		Kind:          config.SpanKind(),
+		Attributes:    config.Attributes(),
+		Links:         config.Links(),
+	})
+
+	scc := trace.SpanContextConfig{
+		TraceID:    tid,
+		SpanID:     sid,
+		TraceState: samplingResult.Tracestate,
+	}
+	if isSampled(samplingResult) {
+		scc.TraceFlags = psc.TraceFlags() | trace.FlagsSampled
+	} else {
+		scc.TraceFlags = psc.TraceFlags() &^ trace.FlagsSampled
+	}
+	sc := trace.NewSpanContext(scc)
+
+	if !isRecording(samplingResult) {
+		return tr.newNonRecordingSpan(sc)
+	}
+	return tr.newRecordingSpan(psc, sc, name, samplingResult, config)
+}
+
+// newRecordingSpan returns a new configured recordingSpan.
+func (tr *tracer) newRecordingSpan(
+	psc, sc trace.SpanContext,
+	name string,
+	sr SamplingResult,
+	config *trace.SpanConfig,
+) *recordingSpan {
+	startTime := config.Timestamp()
+	if startTime.IsZero() {
+		startTime = time.Now()
+	}
+
+	s := &recordingSpan{
+		// Do not pre-allocate the attributes slice here! Doing so will
+		// allocate memory that is likely never going to be used, or if used,
+		// will be over-sized. The default Go compiler has been tested to
+		// dynamically allocate needed space very well. Benchmarking has shown
+		// it to be more performant than what we can predetermine here,
+		// especially for the common use case of few to no added
+		// attributes.
+
+		parent:      psc,
+		spanContext: sc,
+		spanKind:    trace.ValidateSpanKind(config.SpanKind()),
+		name:        name,
+		startTime:   startTime,
+		events:      newEvictedQueueEvent(tr.provider.spanLimits.EventCountLimit),
+		links:       newEvictedQueueLink(tr.provider.spanLimits.LinkCountLimit),
+		tracer:      tr,
+	}
+
+	for _, l := range config.Links() {
+		s.AddLink(l)
+	}
+
+	s.SetAttributes(sr.Attributes...)
+	s.SetAttributes(config.Attributes()...)
+
+	return s
+}
+
+// newNonRecordingSpan returns a new configured nonRecordingSpan.
+func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan {
+	return nonRecordingSpan{tracer: tr, sc: sc}
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go
new file mode 100644
index 0000000..b84dd2c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+// version is the current release version of the metric SDK in use.
+func version() string {
+	return "1.16.0-rc.1"
+}