[VOL-5486] Fix deprecated versions

Change-Id: I3e03ea246020547ae75fa92ce8cf5cbba7e8f3bb
Signed-off-by: Abhay Kumar <abhay.kumar@radisys.com>
diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE
index dd878a3..b9cc55a 100644
--- a/vendor/github.com/prometheus/client_golang/NOTICE
+++ b/vendor/github.com/prometheus/client_golang/NOTICE
@@ -16,8 +16,3 @@
 http://github.com/golang/protobuf/
 Copyright 2010 The Go Authors
 See source code for license details.
-
-Support for streaming Protocol Buffer messages for the Go language (golang).
-https://github.com/matttproud/golang_protobuf_extensions
-Copyright 2013 Matt T. Proud
-Licensed under the Apache License, Version 2.0
diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE
new file mode 100644
index 0000000..65d761b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go
new file mode 100644
index 0000000..8547c8d
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go
@@ -0,0 +1,145 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+// Package header provides functions for parsing HTTP headers.
+package header
+
+import (
+	"net/http"
+	"strings"
+)
+
+// Octet types from RFC 2616.
+var octetTypes [256]octetType
+
+type octetType byte
+
+const (
+	isToken octetType = 1 << iota
+	isSpace
+)
+
+func init() {
+	// OCTET      = <any 8-bit sequence of data>
+	// CHAR       = <any US-ASCII character (octets 0 - 127)>
+	// CTL        = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
+	// CR         = <US-ASCII CR, carriage return (13)>
+	// LF         = <US-ASCII LF, linefeed (10)>
+	// SP         = <US-ASCII SP, space (32)>
+	// HT         = <US-ASCII HT, horizontal-tab (9)>
+	// <">        = <US-ASCII double-quote mark (34)>
+	// CRLF       = CR LF
+	// LWS        = [CRLF] 1*( SP | HT )
+	// TEXT       = <any OCTET except CTLs, but including LWS>
+	// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
+	//              | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
+	// token      = 1*<any CHAR except CTLs or separators>
+	// qdtext     = <any TEXT except <">>
+
+	for c := 0; c < 256; c++ {
+		var t octetType
+		isCtl := c <= 31 || c == 127
+		isChar := 0 <= c && c <= 127
+		isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
+		if strings.ContainsRune(" \t\r\n", rune(c)) {
+			t |= isSpace
+		}
+		if isChar && !isCtl && !isSeparator {
+			t |= isToken
+		}
+		octetTypes[c] = t
+	}
+}
+
+// AcceptSpec describes an Accept* header.
+type AcceptSpec struct {
+	Value string
+	Q     float64
+}
+
+// ParseAccept parses Accept* headers.
+func ParseAccept(header http.Header, key string) (specs []AcceptSpec) {
+loop:
+	for _, s := range header[key] {
+		for {
+			var spec AcceptSpec
+			spec.Value, s = expectTokenSlash(s)
+			if spec.Value == "" {
+				continue loop
+			}
+			spec.Q = 1.0
+			s = skipSpace(s)
+			if strings.HasPrefix(s, ";") {
+				s = skipSpace(s[1:])
+				if !strings.HasPrefix(s, "q=") {
+					continue loop
+				}
+				spec.Q, s = expectQuality(s[2:])
+				if spec.Q < 0.0 {
+					continue loop
+				}
+			}
+			specs = append(specs, spec)
+			s = skipSpace(s)
+			if !strings.HasPrefix(s, ",") {
+				continue loop
+			}
+			s = skipSpace(s[1:])
+		}
+	}
+	return
+}
+
+func skipSpace(s string) (rest string) {
+	i := 0
+	for ; i < len(s); i++ {
+		if octetTypes[s[i]]&isSpace == 0 {
+			break
+		}
+	}
+	return s[i:]
+}
+
+func expectTokenSlash(s string) (token, rest string) {
+	i := 0
+	for ; i < len(s); i++ {
+		b := s[i]
+		if (octetTypes[b]&isToken == 0) && b != '/' {
+			break
+		}
+	}
+	return s[:i], s[i:]
+}
+
+func expectQuality(s string) (q float64, rest string) {
+	switch {
+	case len(s) == 0:
+		return -1, ""
+	case s[0] == '0':
+		q = 0
+	case s[0] == '1':
+		q = 1
+	default:
+		return -1, ""
+	}
+	s = s[1:]
+	if !strings.HasPrefix(s, ".") {
+		return q, s
+	}
+	s = s[1:]
+	i := 0
+	n := 0
+	d := 1
+	for ; i < len(s); i++ {
+		b := s[i]
+		if b < '0' || b > '9' {
+			break
+		}
+		n = n*10 + int(b) - '0'
+		d *= 10
+	}
+	return q + float64(n)/float64(d), s[i:]
+}
diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go
new file mode 100644
index 0000000..2e45780
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go
@@ -0,0 +1,36 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+package httputil
+
+import (
+	"net/http"
+
+	"github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header"
+)
+
+// NegotiateContentEncoding returns the best offered content encoding for the
+// request's Accept-Encoding header. If two offers match with equal weight and
+// then the offer earlier in the list is preferred. If no offers are
+// acceptable, then "" is returned.
+func NegotiateContentEncoding(r *http.Request, offers []string) string {
+	bestOffer := "identity"
+	bestQ := -1.0
+	specs := header.ParseAccept(r.Header, "Accept-Encoding")
+	for _, offer := range offers {
+		for _, spec := range specs {
+			if spec.Q > bestQ &&
+				(spec.Value == "*" || spec.Value == offer) {
+				bestQ = spec.Q
+				bestOffer = offer
+			}
+		}
+	}
+	if bestQ == 0 {
+		bestOffer = ""
+	}
+	return bestOffer
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md
index 44986bf..c67ff1b 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/README.md
+++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md
@@ -1 +1 @@
-See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).
+See [![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/client_golang/prometheus.svg)](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus).
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
new file mode 100644
index 0000000..450189f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
@@ -0,0 +1,38 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "runtime/debug"
+
+// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewBuildInfoCollector instead.
+func NewBuildInfoCollector() Collector {
+	path, version, sum := "unknown", "unknown", "unknown"
+	if bi, ok := debug.ReadBuildInfo(); ok {
+		path = bi.Main.Path
+		version = bi.Main.Version
+		sum = bi.Main.Sum
+	}
+	c := &selfCollector{MustNewConstMetric(
+		NewDesc(
+			"go_build_info",
+			"Build information about the main Go module.",
+			nil, Labels{"path": path, "version": version, "checksum": sum},
+		),
+		GaugeValue, 1)}
+	c.init(c.self)
+	return c
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
index 1e83965..cf05079 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -69,9 +69,9 @@
 // If a Collector collects the same metrics throughout its lifetime, its
 // Describe method can simply be implemented as:
 //
-//   func (c customCollector) Describe(ch chan<- *Desc) {
-//   	DescribeByCollect(c, ch)
-//   }
+//	func (c customCollector) Describe(ch chan<- *Desc) {
+//		DescribeByCollect(c, ch)
+//	}
 //
 // However, this will not work if the metrics collected change dynamically over
 // the lifetime of the Collector in a way that their combined set of descriptors
@@ -118,3 +118,11 @@
 func (c *selfCollector) Collect(ch chan<- Metric) {
 	ch <- c.self
 }
+
+// collectorMetric is a metric that is also a collector.
+// Because of selfCollector, most (if not all) Metrics in
+// this package are also collectors.
+type collectorMetric interface {
+	Metric
+	Collector
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go b/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go
new file mode 100644
index 0000000..9a71a15
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go
@@ -0,0 +1,30 @@
+// Copyright 2025 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// CollectorFunc is a convenient way to implement a Prometheus Collector
+// without interface boilerplate.
+// This implementation is based on DescribeByCollect method.
+// familiarize yourself to it before using.
+type CollectorFunc func(chan<- Metric)
+
+// Collect calls the defined CollectorFunc function with the provided Metrics channel
+func (f CollectorFunc) Collect(ch chan<- Metric) {
+	f(ch)
+}
+
+// Describe sends the descriptor information using DescribeByCollect
+func (f CollectorFunc) Describe(ch chan<- *Desc) {
+	DescribeByCollect(f, ch)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
index 3f8fd79..4ce84e7 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -20,6 +20,7 @@
 	"time"
 
 	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/types/known/timestamppb"
 )
 
 // Counter is a Metric that represents a single numerical value that only ever
@@ -51,7 +52,7 @@
 // will lead to a valid (label-less) exemplar. But if Labels is nil, the current
 // exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
 // of the provided labels are invalid, or if the provided labels contain more
-// than 64 runes in total.
+// than 128 runes in total.
 type ExemplarAdder interface {
 	AddWithExemplar(value float64, exemplar Labels)
 }
@@ -59,6 +60,18 @@
 // CounterOpts is an alias for Opts. See there for doc comments.
 type CounterOpts Opts
 
+// CounterVecOpts bundles the options to create a CounterVec metric.
+// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type CounterVecOpts struct {
+	CounterOpts
+
+	// VariableLabels are used to partition the metric vector by the given set
+	// of labels. Each label value will be constrained with the optional Constraint
+	// function, if provided.
+	VariableLabels ConstrainableLabels
+}
+
 // NewCounter creates a new Counter based on the provided CounterOpts.
 //
 // The returned implementation also implements ExemplarAdder. It is safe to
@@ -78,8 +91,12 @@
 		nil,
 		opts.ConstLabels,
 	)
-	result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now}
+	if opts.now == nil {
+		opts.now = time.Now
+	}
+	result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now}
 	result.init(result) // Init self-collection.
+	result.createdTs = timestamppb.New(opts.now())
 	return result
 }
 
@@ -94,10 +111,12 @@
 	selfCollector
 	desc *Desc
 
+	createdTs  *timestamppb.Timestamp
 	labelPairs []*dto.LabelPair
 	exemplar   atomic.Value // Containing nil or a *dto.Exemplar.
 
-	now func() time.Time // To mock out time.Now() for testing.
+	// now is for testing purposes, by default it's time.Now.
+	now func() time.Time
 }
 
 func (c *counter) Desc() *Desc {
@@ -133,17 +152,21 @@
 	atomic.AddUint64(&c.valInt, 1)
 }
 
-func (c *counter) Write(out *dto.Metric) error {
+func (c *counter) get() float64 {
 	fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
 	ival := atomic.LoadUint64(&c.valInt)
-	val := fval + float64(ival)
+	return fval + float64(ival)
+}
 
+func (c *counter) Write(out *dto.Metric) error {
+	// Read the Exemplar first and the value second. This is to avoid a race condition
+	// where users see an exemplar for a not-yet-existing observation.
 	var exemplar *dto.Exemplar
 	if e := c.exemplar.Load(); e != nil {
 		exemplar = e.(*dto.Exemplar)
 	}
-
-	return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
+	val := c.get()
+	return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs)
 }
 
 func (c *counter) updateExemplar(v float64, l Labels) {
@@ -169,19 +192,31 @@
 // NewCounterVec creates a new CounterVec based on the provided CounterOpts and
 // partitioned by the given label names.
 func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
-	desc := NewDesc(
+	return V2.NewCounterVec(CounterVecOpts{
+		CounterOpts:    opts,
+		VariableLabels: UnconstrainedLabels(labelNames),
+	})
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts.
+func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec {
+	desc := V2.NewDesc(
 		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
 		opts.Help,
-		labelNames,
+		opts.VariableLabels,
 		opts.ConstLabels,
 	)
+	if opts.now == nil {
+		opts.now = time.Now
+	}
 	return &CounterVec{
 		MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
-			if len(lvs) != len(desc.variableLabels) {
-				panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
+			if len(lvs) != len(desc.variableLabels.names) {
+				panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
 			}
-			result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now}
+			result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now}
 			result.init(result) // Init self-collection.
+			result.createdTs = timestamppb.New(opts.now())
 			return result
 		}),
 	}
@@ -241,7 +276,8 @@
 // WithLabelValues works as GetMetricWithLabelValues, but panics where
 // GetMetricWithLabelValues would have returned an error. Not returning an
 // error allows shortcuts like
-//     myVec.WithLabelValues("404", "GET").Add(42)
+//
+//	myVec.WithLabelValues("404", "GET").Add(42)
 func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
 	c, err := v.GetMetricWithLabelValues(lvs...)
 	if err != nil {
@@ -252,7 +288,8 @@
 
 // With works as GetMetricWith, but panics where GetMetricWithLabels would have
 // returned an error. Not returning an error allows shortcuts like
-//     myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+//
+//	myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
 func (v *CounterVec) With(labels Labels) Counter {
 	c, err := v.GetMetricWith(labels)
 	if err != nil {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
index 4bb816a..2331b8b 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -14,17 +14,16 @@
 package prometheus
 
 import (
-	"errors"
 	"fmt"
 	"sort"
 	"strings"
 
 	"github.com/cespare/xxhash/v2"
-	//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
-	"github.com/golang/protobuf/proto"
-	"github.com/prometheus/common/model"
-
 	dto "github.com/prometheus/client_model/go"
+	"github.com/prometheus/common/model"
+	"google.golang.org/protobuf/proto"
+
+	"github.com/prometheus/client_golang/prometheus/internal"
 )
 
 // Desc is the descriptor used by every Prometheus Metric. It is essentially
@@ -51,9 +50,9 @@
 	// constLabelPairs contains precalculated DTO label pairs based on
 	// the constant labels.
 	constLabelPairs []*dto.LabelPair
-	// variableLabels contains names of labels for which the metric
-	// maintains variable values.
-	variableLabels []string
+	// variableLabels contains names of labels and normalization function for
+	// which the metric maintains variable values.
+	variableLabels *compiledLabels
 	// id is a hash of the values of the ConstLabels and fqName. This
 	// must be unique among all registered descriptors and can therefore be
 	// used as an identifier of the descriptor.
@@ -77,12 +76,27 @@
 // For constLabels, the label values are constant. Therefore, they are fully
 // specified in the Desc. See the Collector example for a usage pattern.
 func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
+	return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels)
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName must not be empty.
+//
+// variableLabels only contain the label names and normalization functions. Their
+// label values are variable and therefore not part of the Desc. (They are managed
+// within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Collector example for a usage pattern.
+func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc {
 	d := &Desc{
 		fqName:         fqName,
 		help:           help,
-		variableLabels: variableLabels,
+		variableLabels: variableLabels.compile(),
 	}
-	if !model.IsValidMetricName(model.LabelValue(fqName)) {
+	//nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme.
+	if !model.NameValidationScheme.IsValidMetricName(fqName) {
 		d.err = fmt.Errorf("%q is not a valid metric name", fqName)
 		return d
 	}
@@ -90,7 +104,7 @@
 	// their sorted label names) plus the fqName (at position 0).
 	labelValues := make([]string, 1, len(constLabels)+1)
 	labelValues[0] = fqName
-	labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
+	labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names))
 	labelNameSet := map[string]struct{}{}
 	// First add only the const label names and sort them...
 	for labelName := range constLabels {
@@ -115,16 +129,16 @@
 	// Now add the variable label names, but prefix them with something that
 	// cannot be in a regular label name. That prevents matching the label
 	// dimension with a different mix between preset and variable labels.
-	for _, labelName := range variableLabels {
-		if !checkLabelName(labelName) {
-			d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
+	for _, label := range d.variableLabels.names {
+		if !checkLabelName(label) {
+			d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName)
 			return d
 		}
-		labelNames = append(labelNames, "$"+labelName)
-		labelNameSet[labelName] = struct{}{}
+		labelNames = append(labelNames, "$"+label)
+		labelNameSet[label] = struct{}{}
 	}
 	if len(labelNames) != len(labelNameSet) {
-		d.err = errors.New("duplicate label names")
+		d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName)
 		return d
 	}
 
@@ -154,7 +168,7 @@
 			Value: proto.String(v),
 		})
 	}
-	sort.Sort(labelPairSorter(d.constLabelPairs))
+	sort.Sort(internal.LabelPairSorter(d.constLabelPairs))
 	return d
 }
 
@@ -176,11 +190,22 @@
 			fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
 		)
 	}
+	vlStrings := []string{}
+	if d.variableLabels != nil {
+		vlStrings = make([]string, 0, len(d.variableLabels.names))
+		for _, vl := range d.variableLabels.names {
+			if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
+				vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
+			} else {
+				vlStrings = append(vlStrings, vl)
+			}
+		}
+	}
 	return fmt.Sprintf(
-		"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
+		"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}",
 		d.fqName,
 		d.help,
 		strings.Join(lpStrings, ","),
-		d.variableLabels,
+		strings.Join(vlStrings, ","),
 	)
 }
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
index 9845012..962608f 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -21,55 +21,66 @@
 // All exported functions and methods are safe to be used concurrently unless
 // specified otherwise.
 //
-// A Basic Example
+// # A Basic Example
 //
 // As a starting point, a very basic usage example:
 //
-//    package main
+//	package main
 //
-//    import (
-//    	"log"
-//    	"net/http"
+//	import (
+//		"log"
+//		"net/http"
 //
-//    	"github.com/prometheus/client_golang/prometheus"
-//    	"github.com/prometheus/client_golang/prometheus/promhttp"
-//    )
+//		"github.com/prometheus/client_golang/prometheus"
+//		"github.com/prometheus/client_golang/prometheus/promhttp"
+//	)
 //
-//    var (
-//    	cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
-//    		Name: "cpu_temperature_celsius",
-//    		Help: "Current temperature of the CPU.",
-//    	})
-//    	hdFailures = prometheus.NewCounterVec(
-//    		prometheus.CounterOpts{
-//    			Name: "hd_errors_total",
-//    			Help: "Number of hard-disk errors.",
-//    		},
-//    		[]string{"device"},
-//    	)
-//    )
+//	type metrics struct {
+//		cpuTemp  prometheus.Gauge
+//		hdFailures *prometheus.CounterVec
+//	}
 //
-//    func init() {
-//    	// Metrics have to be registered to be exposed:
-//    	prometheus.MustRegister(cpuTemp)
-//    	prometheus.MustRegister(hdFailures)
-//    }
+//	func NewMetrics(reg prometheus.Registerer) *metrics {
+//		m := &metrics{
+//			cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
+//				Name: "cpu_temperature_celsius",
+//				Help: "Current temperature of the CPU.",
+//			}),
+//			hdFailures: prometheus.NewCounterVec(
+//				prometheus.CounterOpts{
+//					Name: "hd_errors_total",
+//					Help: "Number of hard-disk errors.",
+//				},
+//				[]string{"device"},
+//			),
+//		}
+//		reg.MustRegister(m.cpuTemp)
+//		reg.MustRegister(m.hdFailures)
+//		return m
+//	}
 //
-//    func main() {
-//    	cpuTemp.Set(65.3)
-//    	hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
+//	func main() {
+//		// Create a non-global registry.
+//		reg := prometheus.NewRegistry()
 //
-//    	// The Handler function provides a default handler to expose metrics
-//    	// via an HTTP server. "/metrics" is the usual endpoint for that.
-//    	http.Handle("/metrics", promhttp.Handler())
-//    	log.Fatal(http.ListenAndServe(":8080", nil))
-//    }
+//		// Create new metrics and register them using the custom registry.
+//		m := NewMetrics(reg)
+//		// Set values for the new created metrics.
+//		m.cpuTemp.Set(65.3)
+//		m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
 //
+//		// Expose metrics and custom registry via an HTTP server
+//		// using the HandleFor function. "/metrics" is the usual endpoint for that.
+//		http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}))
+//		log.Fatal(http.ListenAndServe(":8080", nil))
+//	}
 //
 // This is a complete program that exports two metrics, a Gauge and a Counter,
 // the latter with a label attached to turn it into a (one-dimensional) vector.
+// It register the metrics using a custom registry and exposes them via an HTTP server
+// on the /metrics endpoint.
 //
-// Metrics
+// # Metrics
 //
 // The number of exported identifiers in this package might appear a bit
 // overwhelming. However, in addition to the basic plumbing shown in the example
@@ -100,7 +111,7 @@
 // To create instances of Metrics and their vector versions, you need a suitable
 // …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
 //
-// Custom Collectors and constant Metrics
+// # Custom Collectors and constant Metrics
 //
 // While you could create your own implementations of Metric, most likely you
 // will only ever implement the Collector interface on your own. At a first
@@ -141,7 +152,7 @@
 // a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
 // shortcuts.
 //
-// Advanced Uses of the Registry
+// # Advanced Uses of the Registry
 //
 // While MustRegister is the by far most common way of registering a Collector,
 // sometimes you might want to handle the errors the registration might cause.
@@ -176,23 +187,23 @@
 // NewProcessCollector). With a custom registry, you are in control and decide
 // yourself about the Collectors to register.
 //
-// HTTP Exposition
+// # HTTP Exposition
 //
 // The Registry implements the Gatherer interface. The caller of the Gather
 // method can then expose the gathered metrics in some way. Usually, the metrics
 // are served via HTTP on the /metrics endpoint. That's happening in the example
 // above. The tools to expose metrics via HTTP are in the promhttp sub-package.
 //
-// Pushing to the Pushgateway
+// # Pushing to the Pushgateway
 //
 // Function for pushing to the Pushgateway can be found in the push sub-package.
 //
-// Graphite Bridge
+// # Graphite Bridge
 //
 // Functions and examples to push metrics from a Gatherer to Graphite can be
 // found in the graphite sub-package.
 //
-// Other Means of Exposition
+// # Other Means of Exposition
 //
 // More ways of exposing metrics can easily be added by following the approaches
 // of the existing implementations.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
index c41ab37..de5a856 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
@@ -48,7 +48,7 @@
 			continue
 		}
 		var v interface{}
-		labels := make([]string, len(desc.variableLabels))
+		labels := make([]string, len(desc.variableLabels.names))
 		if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
 			ch <- NewInvalidMetric(desc, err)
 			continue
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
index bd0733d..dd2eac9 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -55,6 +55,18 @@
 // GaugeOpts is an alias for Opts. See there for doc comments.
 type GaugeOpts Opts
 
+// GaugeVecOpts bundles the options to create a GaugeVec metric.
+// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type GaugeVecOpts struct {
+	GaugeOpts
+
+	// VariableLabels are used to partition the metric vector by the given set
+	// of labels. Each label value will be constrained with the optional Constraint
+	// function, if provided.
+	VariableLabels ConstrainableLabels
+}
+
 // NewGauge creates a new Gauge based on the provided GaugeOpts.
 //
 // The returned implementation is optimized for a fast Set method. If you have a
@@ -123,7 +135,7 @@
 
 func (g *gauge) Write(out *dto.Metric) error {
 	val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
-	return populateMetric(GaugeValue, val, g.labelPairs, nil, out)
+	return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil)
 }
 
 // GaugeVec is a Collector that bundles a set of Gauges that all share the same
@@ -138,16 +150,24 @@
 // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
 // partitioned by the given label names.
 func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
-	desc := NewDesc(
+	return V2.NewGaugeVec(GaugeVecOpts{
+		GaugeOpts:      opts,
+		VariableLabels: UnconstrainedLabels(labelNames),
+	})
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts.
+func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec {
+	desc := V2.NewDesc(
 		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
 		opts.Help,
-		labelNames,
+		opts.VariableLabels,
 		opts.ConstLabels,
 	)
 	return &GaugeVec{
 		MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
-			if len(lvs) != len(desc.variableLabels) {
-				panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
+			if len(lvs) != len(desc.variableLabels.names) {
+				panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
 			}
 			result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
 			result.init(result) // Init self-collection.
@@ -210,7 +230,8 @@
 // WithLabelValues works as GetMetricWithLabelValues, but panics where
 // GetMetricWithLabelValues would have returned an error. Not returning an
 // error allows shortcuts like
-//     myVec.WithLabelValues("404", "GET").Add(42)
+//
+//	myVec.WithLabelValues("404", "GET").Add(42)
 func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
 	g, err := v.GetMetricWithLabelValues(lvs...)
 	if err != nil {
@@ -221,7 +242,8 @@
 
 // With works as GetMetricWith, but panics where GetMetricWithLabels would have
 // returned an error. Not returning an error allows shortcuts like
-//     myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+//
+//	myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
 func (v *GaugeVec) With(labels Labels) Gauge {
 	g, err := v.GetMetricWith(labels)
 	if err != nil {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
new file mode 100644
index 0000000..614fd61
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
@@ -0,0 +1,26 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !js || wasm
+// +build !js wasm
+
+package prometheus
+
+import "os"
+
+func getPIDFn() func() (int, error) {
+	pid := os.Getpid()
+	return func() (int, error) {
+		return pid, nil
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
new file mode 100644
index 0000000..eaf8059
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
@@ -0,0 +1,23 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build js && !wasm
+// +build js,!wasm
+
+package prometheus
+
+func getPIDFn() func() (int, error) {
+	return func() (int, error) {
+		return 1, nil
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
index a96ed1c..520cbd7 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -16,32 +16,198 @@
 import (
 	"runtime"
 	"runtime/debug"
-	"sync"
 	"time"
 )
 
-type goCollector struct {
+// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats.
+// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so
+// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is
+// populated using runtime/metrics. Those are the defaults we can't alter.
+func goRuntimeMemStats() memStatsMetrics {
+	return memStatsMetrics{
+		{
+			desc: NewDesc(
+				memstatNamespace("alloc_bytes"),
+				"Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("alloc_bytes_total"),
+				"Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
+			valType: CounterValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("sys_bytes"),
+				"Number of bytes obtained from system. Equals to /memory/classes/total:byte.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("mallocs_total"),
+				// TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric.
+				"Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
+			valType: CounterValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("frees_total"),
+				"Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
+			valType: CounterValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("heap_alloc_bytes"),
+				"Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("heap_sys_bytes"),
+				"Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("heap_idle_bytes"),
+				"Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("heap_inuse_bytes"),
+				"Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("heap_released_bytes"),
+				"Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("heap_objects"),
+				"Number of currently allocated objects. Equals to /gc/heap/objects:objects.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("stack_inuse_bytes"),
+				"Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("stack_sys_bytes"),
+				"Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("mspan_inuse_bytes"),
+				"Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("mspan_sys_bytes"),
+				"Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("mcache_inuse_bytes"),
+				"Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("mcache_sys_bytes"),
+				"Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("buck_hash_sys_bytes"),
+				"Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("gc_sys_bytes"),
+				"Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("other_sys_bytes"),
+				"Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("next_gc_bytes"),
+				"Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
+			valType: GaugeValue,
+		},
+	}
+}
+
+type baseGoCollector struct {
 	goroutinesDesc *Desc
 	threadsDesc    *Desc
 	gcDesc         *Desc
+	gcLastTimeDesc *Desc
 	goInfoDesc     *Desc
-
-	// ms... are memstats related.
-	msLast          *runtime.MemStats // Previously collected memstats.
-	msLastTimestamp time.Time
-	msMtx           sync.Mutex // Protects msLast and msLastTimestamp.
-	msMetrics       memStatsMetrics
-	msRead          func(*runtime.MemStats) // For mocking in tests.
-	msMaxWait       time.Duration           // Wait time for fresh memstats.
-	msMaxAge        time.Duration           // Maximum allowed age of old memstats.
 }
 
-// NewGoCollector is the obsolete version of collectors.NewGoCollector.
-// See there for documentation.
-//
-// Deprecated: Use collectors.NewGoCollector instead.
-func NewGoCollector() Collector {
-	return &goCollector{
+func newBaseGoCollector() baseGoCollector {
+	return baseGoCollector{
 		goroutinesDesc: NewDesc(
 			"go_goroutines",
 			"Number of goroutines that currently exist.",
@@ -52,248 +218,34 @@
 			nil, nil),
 		gcDesc: NewDesc(
 			"go_gc_duration_seconds",
-			"A summary of the pause duration of garbage collection cycles.",
+			"A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.",
+			nil, nil),
+		gcLastTimeDesc: NewDesc(
+			"go_memstats_last_gc_time_seconds",
+			"Number of seconds since 1970 of last garbage collection.",
 			nil, nil),
 		goInfoDesc: NewDesc(
 			"go_info",
 			"Information about the Go environment.",
 			nil, Labels{"version": runtime.Version()}),
-		msLast:    &runtime.MemStats{},
-		msRead:    runtime.ReadMemStats,
-		msMaxWait: time.Second,
-		msMaxAge:  5 * time.Minute,
-		msMetrics: memStatsMetrics{
-			{
-				desc: NewDesc(
-					memstatNamespace("alloc_bytes"),
-					"Number of bytes allocated and still in use.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
-				valType: GaugeValue,
-			}, {
-				desc: NewDesc(
-					memstatNamespace("alloc_bytes_total"),
-					"Total number of bytes allocated, even if freed.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
-				valType: CounterValue,
-			}, {
-				desc: NewDesc(
-					memstatNamespace("sys_bytes"),
-					"Number of bytes obtained from system.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
-				valType: GaugeValue,
-			}, {
-				desc: NewDesc(
-					memstatNamespace("lookups_total"),
-					"Total number of pointer lookups.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
-				valType: CounterValue,
-			}, {
-				desc: NewDesc(
-					memstatNamespace("mallocs_total"),
-					"Total number of mallocs.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
-				valType: CounterValue,
-			}, {
-				desc: NewDesc(
-					memstatNamespace("frees_total"),
-					"Total number of frees.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
-				valType: CounterValue,
-			}, {
-				desc: NewDesc(
-					memstatNamespace("heap_alloc_bytes"),
-					"Number of heap bytes allocated and still in use.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
-				valType: GaugeValue,
-			}, {
-				desc: NewDesc(
-					memstatNamespace("heap_sys_bytes"),
-					"Number of heap bytes obtained from system.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
-				valType: GaugeValue,
-			}, {
-				desc: NewDesc(
-					memstatNamespace("heap_idle_bytes"),
-					"Number of heap bytes waiting to be used.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
-				valType: GaugeValue,
-			}, {
-				desc: NewDesc(
-					memstatNamespace("heap_inuse_bytes"),
-					"Number of heap bytes that are in use.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
-				valType: GaugeValue,
-			}, {
-				desc: NewDesc(
-					memstatNamespace("heap_released_bytes"),
-					"Number of heap bytes released to OS.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
-				valType: GaugeValue,
-			}, {
-				desc: NewDesc(
-					memstatNamespace("heap_objects"),
-					"Number of allocated objects.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
-				valType: GaugeValue,
-			}, {
-				desc: NewDesc(
-					memstatNamespace("stack_inuse_bytes"),
-					"Number of bytes in use by the stack allocator.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
-				valType: GaugeValue,
-			}, {
-				desc: NewDesc(
-					memstatNamespace("stack_sys_bytes"),
-					"Number of bytes obtained from system for stack allocator.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
-				valType: GaugeValue,
-			}, {
-				desc: NewDesc(
-					memstatNamespace("mspan_inuse_bytes"),
-					"Number of bytes in use by mspan structures.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
-				valType: GaugeValue,
-			}, {
-				desc: NewDesc(
-					memstatNamespace("mspan_sys_bytes"),
-					"Number of bytes used for mspan structures obtained from system.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
-				valType: GaugeValue,
-			}, {
-				desc: NewDesc(
-					memstatNamespace("mcache_inuse_bytes"),
-					"Number of bytes in use by mcache structures.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
-				valType: GaugeValue,
-			}, {
-				desc: NewDesc(
-					memstatNamespace("mcache_sys_bytes"),
-					"Number of bytes used for mcache structures obtained from system.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
-				valType: GaugeValue,
-			}, {
-				desc: NewDesc(
-					memstatNamespace("buck_hash_sys_bytes"),
-					"Number of bytes used by the profiling bucket hash table.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
-				valType: GaugeValue,
-			}, {
-				desc: NewDesc(
-					memstatNamespace("gc_sys_bytes"),
-					"Number of bytes used for garbage collection system metadata.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
-				valType: GaugeValue,
-			}, {
-				desc: NewDesc(
-					memstatNamespace("other_sys_bytes"),
-					"Number of bytes used for other system allocations.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
-				valType: GaugeValue,
-			}, {
-				desc: NewDesc(
-					memstatNamespace("next_gc_bytes"),
-					"Number of heap bytes when next garbage collection will take place.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
-				valType: GaugeValue,
-			}, {
-				desc: NewDesc(
-					memstatNamespace("last_gc_time_seconds"),
-					"Number of seconds since 1970 of last garbage collection.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
-				valType: GaugeValue,
-			}, {
-				desc: NewDesc(
-					memstatNamespace("gc_cpu_fraction"),
-					"The fraction of this program's available CPU time used by the GC since the program started.",
-					nil, nil,
-				),
-				eval:    func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
-				valType: GaugeValue,
-			},
-		},
 	}
 }
 
-func memstatNamespace(s string) string {
-	return "go_memstats_" + s
-}
-
 // Describe returns all descriptions of the collector.
-func (c *goCollector) Describe(ch chan<- *Desc) {
+func (c *baseGoCollector) Describe(ch chan<- *Desc) {
 	ch <- c.goroutinesDesc
 	ch <- c.threadsDesc
 	ch <- c.gcDesc
+	ch <- c.gcLastTimeDesc
 	ch <- c.goInfoDesc
-	for _, i := range c.msMetrics {
-		ch <- i.desc
-	}
 }
 
 // Collect returns the current state of all metrics of the collector.
-func (c *goCollector) Collect(ch chan<- Metric) {
-	var (
-		ms   = &runtime.MemStats{}
-		done = make(chan struct{})
-	)
-	// Start reading memstats first as it might take a while.
-	go func() {
-		c.msRead(ms)
-		c.msMtx.Lock()
-		c.msLast = ms
-		c.msLastTimestamp = time.Now()
-		c.msMtx.Unlock()
-		close(done)
-	}()
-
+func (c *baseGoCollector) Collect(ch chan<- Metric) {
 	ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
-	n, _ := runtime.ThreadCreateProfile(nil)
-	ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
+
+	n := getRuntimeNumThreads()
+	ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, n)
 
 	var stats debug.GCStats
 	stats.PauseQuantiles = make([]time.Duration, 5)
@@ -305,63 +257,18 @@
 	}
 	quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
 	ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
-
+	ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
 	ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
-
-	timer := time.NewTimer(c.msMaxWait)
-	select {
-	case <-done: // Our own ReadMemStats succeeded in time. Use it.
-		timer.Stop() // Important for high collection frequencies to not pile up timers.
-		c.msCollect(ch, ms)
-		return
-	case <-timer.C: // Time out, use last memstats if possible. Continue below.
-	}
-	c.msMtx.Lock()
-	if time.Since(c.msLastTimestamp) < c.msMaxAge {
-		// Last memstats are recent enough. Collect from them under the lock.
-		c.msCollect(ch, c.msLast)
-		c.msMtx.Unlock()
-		return
-	}
-	// If we are here, the last memstats are too old or don't exist. We have
-	// to wait until our own ReadMemStats finally completes. For that to
-	// happen, we have to release the lock.
-	c.msMtx.Unlock()
-	<-done
-	c.msCollect(ch, ms)
 }
 
-func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
-	for _, i := range c.msMetrics {
-		ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
-	}
+func memstatNamespace(s string) string {
+	return "go_memstats_" + s
 }
 
-// memStatsMetrics provide description, value, and value type for memstat metrics.
+// memStatsMetrics provide description, evaluator, runtime/metrics name, and
+// value type for memstat metrics.
 type memStatsMetrics []struct {
 	desc    *Desc
 	eval    func(*runtime.MemStats) float64
 	valType ValueType
 }
-
-// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
-// See there for documentation.
-//
-// Deprecated: Use collectors.NewBuildInfoCollector instead.
-func NewBuildInfoCollector() Collector {
-	path, version, sum := "unknown", "unknown", "unknown"
-	if bi, ok := debug.ReadBuildInfo(); ok {
-		path = bi.Main.Path
-		version = bi.Main.Version
-		sum = bi.Main.Sum
-	}
-	c := &selfCollector{MustNewConstMetric(
-		NewDesc(
-			"go_build_info",
-			"Build information about the main Go module.",
-			nil, Labels{"path": path, "version": version, "checksum": sum},
-		),
-		GaugeValue, 1)}
-	c.init(c.self)
-	return c
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
new file mode 100644
index 0000000..897a6e9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
@@ -0,0 +1,122 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !go1.17
+// +build !go1.17
+
+package prometheus
+
+import (
+	"runtime"
+	"sync"
+	"time"
+)
+
+type goCollector struct {
+	base baseGoCollector
+
+	// ms... are memstats related.
+	msLast          *runtime.MemStats // Previously collected memstats.
+	msLastTimestamp time.Time
+	msMtx           sync.Mutex // Protects msLast and msLastTimestamp.
+	msMetrics       memStatsMetrics
+	msRead          func(*runtime.MemStats) // For mocking in tests.
+	msMaxWait       time.Duration           // Wait time for fresh memstats.
+	msMaxAge        time.Duration           // Maximum allowed age of old memstats.
+}
+
+// NewGoCollector is the obsolete version of collectors.NewGoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewGoCollector instead.
+func NewGoCollector() Collector {
+	msMetrics := goRuntimeMemStats()
+	msMetrics = append(msMetrics, struct {
+		desc    *Desc
+		eval    func(*runtime.MemStats) float64
+		valType ValueType
+	}{
+		// This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
+		desc: NewDesc(
+			memstatNamespace("gc_cpu_fraction"),
+			"The fraction of this program's available CPU time used by the GC since the program started.",
+			nil, nil,
+		),
+		eval:    func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
+		valType: GaugeValue,
+	})
+	return &goCollector{
+		base:      newBaseGoCollector(),
+		msLast:    &runtime.MemStats{},
+		msRead:    runtime.ReadMemStats,
+		msMaxWait: time.Second,
+		msMaxAge:  5 * time.Minute,
+		msMetrics: msMetrics,
+	}
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+	c.base.Describe(ch)
+	for _, i := range c.msMetrics {
+		ch <- i.desc
+	}
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+	var (
+		ms   = &runtime.MemStats{}
+		done = make(chan struct{})
+	)
+	// Start reading memstats first as it might take a while.
+	go func() {
+		c.msRead(ms)
+		c.msMtx.Lock()
+		c.msLast = ms
+		c.msLastTimestamp = time.Now()
+		c.msMtx.Unlock()
+		close(done)
+	}()
+
+	// Collect base non-memory metrics.
+	c.base.Collect(ch)
+
+	timer := time.NewTimer(c.msMaxWait)
+	select {
+	case <-done: // Our own ReadMemStats succeeded in time. Use it.
+		timer.Stop() // Important for high collection frequencies to not pile up timers.
+		c.msCollect(ch, ms)
+		return
+	case <-timer.C: // Time out, use last memstats if possible. Continue below.
+	}
+	c.msMtx.Lock()
+	if time.Since(c.msLastTimestamp) < c.msMaxAge {
+		// Last memstats are recent enough. Collect from them under the lock.
+		c.msCollect(ch, c.msLast)
+		c.msMtx.Unlock()
+		return
+	}
+	// If we are here, the last memstats are too old or don't exist. We have
+	// to wait until our own ReadMemStats finally completes. For that to
+	// happen, we have to release the lock.
+	c.msMtx.Unlock()
+	<-done
+	c.msCollect(ch, ms)
+}
+
+func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
+	for _, i := range c.msMetrics {
+		ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
new file mode 100644
index 0000000..6b86847
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
@@ -0,0 +1,574 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.17
+// +build go1.17
+
+package prometheus
+
+import (
+	"fmt"
+	"math"
+	"runtime"
+	"runtime/metrics"
+	"strings"
+	"sync"
+
+	"github.com/prometheus/client_golang/prometheus/internal"
+
+	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/proto"
+)
+
+const (
+	// constants for strings referenced more than once.
+	goGCHeapTinyAllocsObjects               = "/gc/heap/tiny/allocs:objects"
+	goGCHeapAllocsObjects                   = "/gc/heap/allocs:objects"
+	goGCHeapFreesObjects                    = "/gc/heap/frees:objects"
+	goGCHeapFreesBytes                      = "/gc/heap/frees:bytes"
+	goGCHeapAllocsBytes                     = "/gc/heap/allocs:bytes"
+	goGCHeapObjects                         = "/gc/heap/objects:objects"
+	goGCHeapGoalBytes                       = "/gc/heap/goal:bytes"
+	goMemoryClassesTotalBytes               = "/memory/classes/total:bytes"
+	goMemoryClassesHeapObjectsBytes         = "/memory/classes/heap/objects:bytes"
+	goMemoryClassesHeapUnusedBytes          = "/memory/classes/heap/unused:bytes"
+	goMemoryClassesHeapReleasedBytes        = "/memory/classes/heap/released:bytes"
+	goMemoryClassesHeapFreeBytes            = "/memory/classes/heap/free:bytes"
+	goMemoryClassesHeapStacksBytes          = "/memory/classes/heap/stacks:bytes"
+	goMemoryClassesOSStacksBytes            = "/memory/classes/os-stacks:bytes"
+	goMemoryClassesMetadataMSpanInuseBytes  = "/memory/classes/metadata/mspan/inuse:bytes"
+	goMemoryClassesMetadataMSPanFreeBytes   = "/memory/classes/metadata/mspan/free:bytes"
+	goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes"
+	goMemoryClassesMetadataMCacheFreeBytes  = "/memory/classes/metadata/mcache/free:bytes"
+	goMemoryClassesProfilingBucketsBytes    = "/memory/classes/profiling/buckets:bytes"
+	goMemoryClassesMetadataOtherBytes       = "/memory/classes/metadata/other:bytes"
+	goMemoryClassesOtherBytes               = "/memory/classes/other:bytes"
+)
+
+// rmNamesForMemStatsMetrics represents runtime/metrics names required to populate goRuntimeMemStats from like logic.
+var rmNamesForMemStatsMetrics = []string{
+	goGCHeapTinyAllocsObjects,
+	goGCHeapAllocsObjects,
+	goGCHeapFreesObjects,
+	goGCHeapAllocsBytes,
+	goGCHeapObjects,
+	goGCHeapGoalBytes,
+	goMemoryClassesTotalBytes,
+	goMemoryClassesHeapObjectsBytes,
+	goMemoryClassesHeapUnusedBytes,
+	goMemoryClassesHeapReleasedBytes,
+	goMemoryClassesHeapFreeBytes,
+	goMemoryClassesHeapStacksBytes,
+	goMemoryClassesOSStacksBytes,
+	goMemoryClassesMetadataMSpanInuseBytes,
+	goMemoryClassesMetadataMSPanFreeBytes,
+	goMemoryClassesMetadataMCacheInuseBytes,
+	goMemoryClassesMetadataMCacheFreeBytes,
+	goMemoryClassesProfilingBucketsBytes,
+	goMemoryClassesMetadataOtherBytes,
+	goMemoryClassesOtherBytes,
+}
+
+func bestEffortLookupRM(lookup []string) []metrics.Description {
+	ret := make([]metrics.Description, 0, len(lookup))
+	for _, rm := range metrics.All() {
+		for _, m := range lookup {
+			if m == rm.Name {
+				ret = append(ret, rm)
+			}
+		}
+	}
+	return ret
+}
+
+type goCollector struct {
+	base baseGoCollector
+
+	// mu protects updates to all fields ensuring a consistent
+	// snapshot is always produced by Collect.
+	mu sync.Mutex
+
+	// Contains all samples that has to retrieved from runtime/metrics (not all of them will be exposed).
+	sampleBuf []metrics.Sample
+	// sampleMap allows lookup for MemStats metrics and runtime/metrics histograms for exact sums.
+	sampleMap map[string]*metrics.Sample
+
+	// rmExposedMetrics represents all runtime/metrics package metrics
+	// that were configured to be exposed.
+	rmExposedMetrics     []collectorMetric
+	rmExactSumMapForHist map[string]string
+
+	// With Go 1.17, the runtime/metrics package was introduced.
+	// From that point on, metric names produced by the runtime/metrics
+	// package could be generated from runtime/metrics names. However,
+	// these differ from the old names for the same values.
+	//
+	// This field exists to export the same values under the old names
+	// as well.
+	msMetrics        memStatsMetrics
+	msMetricsEnabled bool
+}
+
+type rmMetricDesc struct {
+	metrics.Description
+}
+
+func matchRuntimeMetricsRules(rules []internal.GoCollectorRule) []rmMetricDesc {
+	var descs []rmMetricDesc
+	for _, d := range metrics.All() {
+		var (
+			deny = true
+			desc rmMetricDesc
+		)
+
+		for _, r := range rules {
+			if !r.Matcher.MatchString(d.Name) {
+				continue
+			}
+			deny = r.Deny
+		}
+		if deny {
+			continue
+		}
+
+		desc.Description = d
+		descs = append(descs, desc)
+	}
+	return descs
+}
+
+func defaultGoCollectorOptions() internal.GoCollectorOptions {
+	return internal.GoCollectorOptions{
+		RuntimeMetricSumForHist: map[string]string{
+			"/gc/heap/allocs-by-size:bytes": goGCHeapAllocsBytes,
+			"/gc/heap/frees-by-size:bytes":  goGCHeapFreesBytes,
+		},
+		RuntimeMetricRules: []internal.GoCollectorRule{
+			// Recommended metrics we want by default from runtime/metrics.
+			{Matcher: internal.GoCollectorDefaultRuntimeMetrics},
+		},
+	}
+}
+
+// NewGoCollector is the obsolete version of collectors.NewGoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewGoCollector instead.
+func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
+	opt := defaultGoCollectorOptions()
+	for _, o := range opts {
+		o(&opt)
+	}
+
+	exposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules)
+
+	// Collect all histogram samples so that we can get their buckets.
+	// The API guarantees that the buckets are always fixed for the lifetime
+	// of the process.
+	var histograms []metrics.Sample
+	for _, d := range exposedDescriptions {
+		if d.Kind == metrics.KindFloat64Histogram {
+			histograms = append(histograms, metrics.Sample{Name: d.Name})
+		}
+	}
+
+	if len(histograms) > 0 {
+		metrics.Read(histograms)
+	}
+
+	bucketsMap := make(map[string][]float64)
+	for i := range histograms {
+		bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
+	}
+
+	// Generate a collector for each exposed runtime/metrics metric.
+	metricSet := make([]collectorMetric, 0, len(exposedDescriptions))
+	// SampleBuf is used for reading from runtime/metrics.
+	// We are assuming the largest case to have stable pointers for sampleMap purposes.
+	sampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics))
+	sampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions))
+	for _, d := range exposedDescriptions {
+		namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description)
+		if !ok {
+			// Just ignore this metric; we can't do anything with it here.
+			// If a user decides to use the latest version of Go, we don't want
+			// to fail here. This condition is tested in TestExpectedRuntimeMetrics.
+			continue
+		}
+		help := attachOriginalName(d.Description.Description, d.Name)
+
+		sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
+		sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
+
+		var m collectorMetric
+		if d.Kind == metrics.KindFloat64Histogram {
+			_, hasSum := opt.RuntimeMetricSumForHist[d.Name]
+			unit := d.Name[strings.IndexRune(d.Name, ':')+1:]
+			m = newBatchHistogram(
+				NewDesc(
+					BuildFQName(namespace, subsystem, name),
+					help,
+					nil,
+					nil,
+				),
+				internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit),
+				hasSum,
+			)
+		} else if d.Cumulative {
+			m = NewCounter(CounterOpts{
+				Namespace: namespace,
+				Subsystem: subsystem,
+				Name:      name,
+				Help:      help,
+			},
+			)
+		} else {
+			m = NewGauge(GaugeOpts{
+				Namespace: namespace,
+				Subsystem: subsystem,
+				Name:      name,
+				Help:      help,
+			})
+		}
+		metricSet = append(metricSet, m)
+	}
+
+	// Add exact sum metrics to sampleBuf if not added before.
+	for _, h := range histograms {
+		sumMetric, ok := opt.RuntimeMetricSumForHist[h.Name]
+		if !ok {
+			continue
+		}
+
+		if _, ok := sampleMap[sumMetric]; ok {
+			continue
+		}
+		sampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric})
+		sampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1]
+	}
+
+	var (
+		msMetrics      memStatsMetrics
+		msDescriptions []metrics.Description
+	)
+
+	if !opt.DisableMemStatsLikeMetrics {
+		msMetrics = goRuntimeMemStats()
+		msDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics)
+
+		// Check if metric was not exposed before and if not, add to sampleBuf.
+		for _, mdDesc := range msDescriptions {
+			if _, ok := sampleMap[mdDesc.Name]; ok {
+				continue
+			}
+			sampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name})
+			sampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1]
+		}
+	}
+
+	return &goCollector{
+		base:                 newBaseGoCollector(),
+		sampleBuf:            sampleBuf,
+		sampleMap:            sampleMap,
+		rmExposedMetrics:     metricSet,
+		rmExactSumMapForHist: opt.RuntimeMetricSumForHist,
+		msMetrics:            msMetrics,
+		msMetricsEnabled:     !opt.DisableMemStatsLikeMetrics,
+	}
+}
+
+func attachOriginalName(desc, origName string) string {
+	return fmt.Sprintf("%s Sourced from %s.", desc, origName)
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+	c.base.Describe(ch)
+	for _, i := range c.msMetrics {
+		ch <- i.desc
+	}
+	for _, m := range c.rmExposedMetrics {
+		ch <- m.Desc()
+	}
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+	// Collect base non-memory metrics.
+	c.base.Collect(ch)
+
+	if len(c.sampleBuf) == 0 {
+		return
+	}
+
+	// Collect must be thread-safe, so prevent concurrent use of
+	// sampleBuf elements. Just read into sampleBuf but write all the data
+	// we get into our Metrics or MemStats.
+	//
+	// This lock also ensures that the Metrics we send out are all from
+	// the same updates, ensuring their mutual consistency insofar as
+	// is guaranteed by the runtime/metrics package.
+	//
+	// N.B. This locking is heavy-handed, but Collect is expected to be called
+	// relatively infrequently. Also the core operation here, metrics.Read,
+	// is fast (O(tens of microseconds)) so contention should certainly be
+	// low, though channel operations and any allocations may add to that.
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	// Populate runtime/metrics sample buffer.
+	metrics.Read(c.sampleBuf)
+
+	// Collect all our runtime/metrics user chose to expose from sampleBuf (if any).
+	for i, metric := range c.rmExposedMetrics {
+		// We created samples for exposed metrics first in order, so indexes match.
+		sample := c.sampleBuf[i]
+
+		// N.B. switch on concrete type because it's significantly more efficient
+		// than checking for the Counter and Gauge interface implementations. In
+		// this case, we control all the types here.
+		switch m := metric.(type) {
+		case *counter:
+			// Guard against decreases. This should never happen, but a failure
+			// to do so will result in a panic, which is a harsh consequence for
+			// a metrics collection bug.
+			v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
+			if v1 > v0 {
+				m.Add(unwrapScalarRMValue(sample.Value) - m.get())
+			}
+			m.Collect(ch)
+		case *gauge:
+			m.Set(unwrapScalarRMValue(sample.Value))
+			m.Collect(ch)
+		case *batchHistogram:
+			m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
+			m.Collect(ch)
+		default:
+			panic("unexpected metric type")
+		}
+	}
+
+	if c.msMetricsEnabled {
+		// ms is a dummy MemStats that we populate ourselves so that we can
+		// populate the old metrics from it if goMemStatsCollection is enabled.
+		var ms runtime.MemStats
+		memStatsFromRM(&ms, c.sampleMap)
+		for _, i := range c.msMetrics {
+			ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
+		}
+	}
+}
+
+// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed
+// to be scalar and returns the equivalent float64 value. Panics if the
+// value is not scalar.
+func unwrapScalarRMValue(v metrics.Value) float64 {
+	switch v.Kind() {
+	case metrics.KindUint64:
+		return float64(v.Uint64())
+	case metrics.KindFloat64:
+		return v.Float64()
+	case metrics.KindBad:
+		// Unsupported metric.
+		//
+		// This should never happen because we always populate our metric
+		// set from the runtime/metrics package.
+		panic("unexpected bad kind metric")
+	default:
+		// Unsupported metric kind.
+		//
+		// This should never happen because we check for this during initialization
+		// and flag and filter metrics whose kinds we don't understand.
+		panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind()))
+	}
+}
+
+// exactSumFor takes a runtime/metrics metric name (that is assumed to
+// be of kind KindFloat64Histogram) and returns its exact sum and whether
+// its exact sum exists.
+//
+// The runtime/metrics API for histograms doesn't currently expose exact
+// sums, but some of the other metrics are in fact exact sums of histograms.
+func (c *goCollector) exactSumFor(rmName string) float64 {
+	sumName, ok := c.rmExactSumMapForHist[rmName]
+	if !ok {
+		return 0
+	}
+	s, ok := c.sampleMap[sumName]
+	if !ok {
+		return 0
+	}
+	return unwrapScalarRMValue(s.Value)
+}
+
+func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
+	lookupOrZero := func(name string) uint64 {
+		if s, ok := rm[name]; ok {
+			return s.Value.Uint64()
+		}
+		return 0
+	}
+
+	// Currently, MemStats adds tiny alloc count to both Mallocs AND Frees.
+	// The reason for this is because MemStats couldn't be extended at the time
+	// but there was a desire to have Mallocs at least be a little more representative,
+	// while having Mallocs - Frees still represent a live object count.
+	// Unfortunately, MemStats doesn't actually export a large allocation count,
+	// so it's impossible to pull this number out directly.
+	tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects)
+	ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs
+	ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs
+
+	ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes)
+	ms.Sys = lookupOrZero(goMemoryClassesTotalBytes)
+	ms.Lookups = 0 // Already always zero.
+	ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes)
+	ms.Alloc = ms.HeapAlloc
+	ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes)
+	ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes)
+	ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes)
+	ms.HeapSys = ms.HeapInuse + ms.HeapIdle
+	ms.HeapObjects = lookupOrZero(goGCHeapObjects)
+	ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes)
+	ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes)
+	ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes)
+	ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes)
+	ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes)
+	ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes)
+	ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes)
+	ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes)
+	ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes)
+	ms.NextGC = lookupOrZero(goGCHeapGoalBytes)
+
+	// N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
+	// and often misleading due to the fact that it's an average over the lifetime
+	// of the process.
+	// See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
+	// for more details.
+	ms.GCCPUFraction = 0
+}
+
+// batchHistogram is a mutable histogram that is updated
+// in batches.
+type batchHistogram struct {
+	selfCollector
+
+	// Static fields updated only once.
+	desc   *Desc
+	hasSum bool
+
+	// Because this histogram operates in batches, it just uses a
+	// single mutex for everything. updates are always serialized
+	// but Write calls may operate concurrently with updates.
+	// Contention between these two sources should be rare.
+	mu      sync.Mutex
+	buckets []float64 // Inclusive lower bounds, like runtime/metrics.
+	counts  []uint64
+	sum     float64 // Used if hasSum is true.
+}
+
+// newBatchHistogram creates a new batch histogram value with the given
+// Desc, buckets, and whether or not it has an exact sum available.
+//
+// buckets must always be from the runtime/metrics package, following
+// the same conventions.
+func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
+	// We need to remove -Inf values. runtime/metrics keeps them around.
+	// But -Inf bucket should not be allowed for prometheus histograms.
+	if buckets[0] == math.Inf(-1) {
+		buckets = buckets[1:]
+	}
+	h := &batchHistogram{
+		desc:    desc,
+		buckets: buckets,
+		// Because buckets follows runtime/metrics conventions, there's
+		// 1 more value in the buckets list than there are buckets represented,
+		// because in runtime/metrics, the bucket values represent *boundaries*,
+		// and non-Inf boundaries are inclusive lower bounds for that bucket.
+		counts: make([]uint64, len(buckets)-1),
+		hasSum: hasSum,
+	}
+	h.init(h)
+	return h
+}
+
+// update updates the batchHistogram from a runtime/metrics histogram.
+//
+// sum must be provided if the batchHistogram was created to have an exact sum.
+// h.buckets must be a strict subset of his.Buckets.
+func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) {
+	counts, buckets := his.Counts, his.Buckets
+
+	h.mu.Lock()
+	defer h.mu.Unlock()
+
+	// Clear buckets.
+	for i := range h.counts {
+		h.counts[i] = 0
+	}
+	// Copy and reduce buckets.
+	var j int
+	for i, count := range counts {
+		h.counts[j] += count
+		if buckets[i+1] == h.buckets[j+1] {
+			j++
+		}
+	}
+	if h.hasSum {
+		h.sum = sum
+	}
+}
+
+func (h *batchHistogram) Desc() *Desc {
+	return h.desc
+}
+
+func (h *batchHistogram) Write(out *dto.Metric) error {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+
+	sum := float64(0)
+	if h.hasSum {
+		sum = h.sum
+	}
+	dtoBuckets := make([]*dto.Bucket, 0, len(h.counts))
+	totalCount := uint64(0)
+	for i, count := range h.counts {
+		totalCount += count
+		if !h.hasSum {
+			if count != 0 {
+				// N.B. This computed sum is an underestimate.
+				sum += h.buckets[i] * float64(count)
+			}
+		}
+
+		// Skip the +Inf bucket, but only for the bucket list.
+		// It must still count for sum and totalCount.
+		if math.IsInf(h.buckets[i+1], 1) {
+			break
+		}
+		// Float64Histogram's upper bound is exclusive, so make it inclusive
+		// by obtaining the next float64 value down, in order.
+		upperBound := math.Nextafter(h.buckets[i+1], h.buckets[i])
+		dtoBuckets = append(dtoBuckets, &dto.Bucket{
+			CumulativeCount: proto.Uint64(totalCount),
+			UpperBound:      proto.Float64(upperBound),
+		})
+	}
+	out.Histogram = &dto.Histogram{
+		Bucket:      dtoBuckets,
+		SampleCount: proto.Uint64(totalCount),
+		SampleSum:   proto.Float64(sum),
+	}
+	return nil
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index 8425640..c453b75 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -14,6 +14,7 @@
 package prometheus
 
 import (
+	"errors"
 	"fmt"
 	"math"
 	"runtime"
@@ -22,25 +23,227 @@
 	"sync/atomic"
 	"time"
 
-	//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
-	"github.com/golang/protobuf/proto"
-
 	dto "github.com/prometheus/client_model/go"
+
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/types/known/timestamppb"
 )
 
+const (
+	nativeHistogramSchemaMaximum = 8
+	nativeHistogramSchemaMinimum = -4
+)
+
+// nativeHistogramBounds for the frac of observed values. Only relevant for
+// schema > 0. The position in the slice is the schema. (0 is never used, just
+// here for convenience of using the schema directly as the index.)
+//
+// TODO(beorn7): Currently, we do a binary search into these slices. There are
+// ways to turn it into a small number of simple array lookups. It probably only
+// matters for schema 5 and beyond, but should be investigated. See this comment
+// as a starting point:
+// https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310
+var nativeHistogramBounds = [][]float64{
+	// Schema "0":
+	{0.5},
+	// Schema 1:
+	{0.5, 0.7071067811865475},
+	// Schema 2:
+	{0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144},
+	// Schema 3:
+	{
+		0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048,
+		0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711,
+	},
+	// Schema 4:
+	{
+		0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458,
+		0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463,
+		0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627,
+		0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735,
+	},
+	// Schema 5:
+	{
+		0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117,
+		0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887,
+		0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666,
+		0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159,
+		0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112,
+		0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823,
+		0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533,
+		0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999,
+	},
+	// Schema 6:
+	{
+		0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142,
+		0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598,
+		0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209,
+		0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406,
+		0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349,
+		0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891,
+		0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515,
+		0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555,
+		0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234,
+		0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269,
+		0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334,
+		0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681,
+		0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529,
+		0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991,
+		0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827,
+		0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752,
+	},
+	// Schema 7:
+	{
+		0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764,
+		0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894,
+		0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309,
+		0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545,
+		0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393,
+		0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595,
+		0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754,
+		0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704,
+		0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907,
+		0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665,
+		0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253,
+		0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329,
+		0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032,
+		0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728,
+		0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265,
+		0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076,
+		0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491,
+		0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908,
+		0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126,
+		0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777,
+		0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764,
+		0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465,
+		0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821,
+		0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981,
+		0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312,
+		0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842,
+		0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671,
+		0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263,
+		0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943,
+		0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368,
+		0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164,
+		0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328,
+	},
+	// Schema 8:
+	{
+		0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088,
+		0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869,
+		0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205,
+		0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158,
+		0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313,
+		0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321,
+		0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954,
+		0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847,
+		0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111,
+		0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088,
+		0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098,
+		0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026,
+		0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894,
+		0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493,
+		0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185,
+		0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968,
+		0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903,
+		0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005,
+		0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725,
+		0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082,
+		0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581,
+		0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031,
+		0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346,
+		0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447,
+		0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385,
+		0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788,
+		0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727,
+		0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171,
+		0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058,
+		0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119,
+		0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999,
+		0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352,
+		0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471,
+		0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126,
+		0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218,
+		0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837,
+		0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984,
+		0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031,
+		0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071,
+		0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282,
+		0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442,
+		0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707,
+		0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818,
+		0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853,
+		0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642,
+		0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003,
+		0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079,
+		0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391,
+		0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661,
+		0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629,
+		0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553,
+		0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389,
+		0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771,
+		0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002,
+		0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155,
+		0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483,
+		0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253,
+		0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191,
+		0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693,
+		0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947,
+		0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133,
+		0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889,
+		0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168,
+		0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698,
+	},
+}
+
+// The nativeHistogramBounds above can be generated with the code below.
+//
+// TODO(beorn7): It's tempting to actually use `go generate` to generate the
+// code above. However, this could lead to slightly different numbers on
+// different architectures. We still need to come to terms if we are fine with
+// that, or if we might prefer to specify precise numbers in the standard.
+//
+// var nativeHistogramBounds [][]float64 = make([][]float64, 9)
+//
+// func init() {
+// 	// Populate nativeHistogramBounds.
+// 	numBuckets := 1
+// 	for i := range nativeHistogramBounds {
+// 		bounds := []float64{0.5}
+// 		factor := math.Exp2(math.Exp2(float64(-i)))
+// 		for j := 0; j < numBuckets-1; j++ {
+// 			var bound float64
+// 			if (j+1)%2 == 0 {
+// 				// Use previously calculated value for increased precision.
+// 				bound = nativeHistogramBounds[i-1][j/2+1]
+// 			} else {
+// 				bound = bounds[j] * factor
+// 			}
+// 			bounds = append(bounds, bound)
+// 		}
+// 		numBuckets *= 2
+// 		nativeHistogramBounds[i] = bounds
+// 	}
+// }
+
 // A Histogram counts individual observations from an event or sample stream in
-// configurable buckets. Similar to a summary, it also provides a sum of
-// observations and an observation count.
+// configurable static buckets (or in dynamic sparse buckets as part of the
+// experimental Native Histograms, see below for more details). Similar to a
+// Summary, it also provides a sum of observations and an observation count.
 //
 // On the Prometheus server, quantiles can be calculated from a Histogram using
-// the histogram_quantile function in the query language.
+// the histogram_quantile PromQL function.
 //
-// Note that Histograms, in contrast to Summaries, can be aggregated with the
-// Prometheus query language (see the documentation for detailed
-// procedures). However, Histograms require the user to pre-define suitable
-// buckets, and they are in general less accurate. The Observe method of a
-// Histogram has a very low performance overhead in comparison with the Observe
-// method of a Summary.
+// Note that Histograms, in contrast to Summaries, can be aggregated in PromQL
+// (see the documentation for detailed procedures). However, Histograms require
+// the user to pre-define suitable buckets, and they are in general less
+// accurate. (Both problems are addressed by the experimental Native
+// Histograms. To use them, configure a NativeHistogramBucketFactor in the
+// HistogramOpts. They also require a Prometheus server v2.40+ with the
+// corresponding feature flag enabled.)
+//
+// The Observe method of a Histogram has a very low performance overhead in
+// comparison with the Observe method of a Summary.
 //
 // To create Histogram instances, use NewHistogram.
 type Histogram interface {
@@ -50,7 +253,8 @@
 	// Observe adds a single observation to the histogram. Observations are
 	// usually positive or zero. Negative observations are accepted but
 	// prevent current versions of Prometheus from properly detecting
-	// counter resets in the sum of observations. See
+	// counter resets in the sum of observations. (The experimental Native
+	// Histograms handle negative observations properly.) See
 	// https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
 	// for details.
 	Observe(float64)
@@ -64,18 +268,28 @@
 // tailored to broadly measure the response time (in seconds) of a network
 // service. Most likely, however, you will be required to define buckets
 // customized to your use case.
-var (
-	DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
 
-	errBucketLabelNotAllowed = fmt.Errorf(
-		"%q is not allowed as label name in histograms", bucketLabel,
-	)
+// DefNativeHistogramZeroThreshold is the default value for
+// NativeHistogramZeroThreshold in the HistogramOpts.
+//
+// The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation),
+// which is a bucket boundary at all possible resolutions.
+const DefNativeHistogramZeroThreshold = 2.938735877055719e-39
+
+// NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold
+// in the HistogramOpts to create a zero bucket of width zero, i.e. a zero
+// bucket that only receives observations of precisely zero.
+const NativeHistogramZeroThresholdZero = -1
+
+var errBucketLabelNotAllowed = fmt.Errorf(
+	"%q is not allowed as label name in histograms", bucketLabel,
 )
 
-// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
-// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
-// and not included in the returned slice. The returned slice is meant to be
-// used for the Buckets field of HistogramOpts.
+// LinearBuckets creates 'count' regular buckets, each 'width' wide, where the
+// lowest bucket has an upper bound of 'start'. The final +Inf bucket is not
+// counted and not included in the returned slice. The returned slice is meant
+// to be used for the Buckets field of HistogramOpts.
 //
 // The function panics if 'count' is zero or negative.
 func LinearBuckets(start, width float64, count int) []float64 {
@@ -90,11 +304,11 @@
 	return buckets
 }
 
-// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
-// upper bound of 'start' and each following bucket's upper bound is 'factor'
-// times the previous bucket's upper bound. The final +Inf bucket is not counted
-// and not included in the returned slice. The returned slice is meant to be
-// used for the Buckets field of HistogramOpts.
+// ExponentialBuckets creates 'count' regular buckets, where the lowest bucket
+// has an upper bound of 'start' and each following bucket's upper bound is
+// 'factor' times the previous bucket's upper bound. The final +Inf bucket is
+// not counted and not included in the returned slice. The returned slice is
+// meant to be used for the Buckets field of HistogramOpts.
 //
 // The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
 // or if 'factor' is less than or equal 1.
@@ -116,6 +330,34 @@
 	return buckets
 }
 
+// ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is
+// 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative.
+func ExponentialBucketsRange(minBucket, maxBucket float64, count int) []float64 {
+	if count < 1 {
+		panic("ExponentialBucketsRange count needs a positive count")
+	}
+	if minBucket <= 0 {
+		panic("ExponentialBucketsRange min needs to be greater than 0")
+	}
+
+	// Formula for exponential buckets.
+	// max = min*growthFactor^(bucketCount-1)
+
+	// We know max/min and highest bucket. Solve for growthFactor.
+	growthFactor := math.Pow(maxBucket/minBucket, 1.0/float64(count-1))
+
+	// Now that we know growthFactor, solve for each bucket.
+	buckets := make([]float64, count)
+	for i := 1; i <= count; i++ {
+		buckets[i-1] = minBucket * math.Pow(growthFactor, float64(i-1))
+	}
+	return buckets
+}
+
 // HistogramOpts bundles the options for creating a Histogram metric. It is
 // mandatory to set Name to a non-empty string. All other fields are optional
 // and can safely be left at their zero value, although it is strongly
@@ -152,8 +394,124 @@
 	// element in the slice is the upper inclusive bound of a bucket. The
 	// values must be sorted in strictly increasing order. There is no need
 	// to add a highest bucket with +Inf bound, it will be added
-	// implicitly. The default value is DefBuckets.
+	// implicitly. If Buckets is left as nil or set to a slice of length
+	// zero, it is replaced by default buckets. The default buckets are
+	// DefBuckets if no buckets for a native histogram (see below) are used,
+	// otherwise the default is no buckets. (In other words, if you want to
+	// use both regular buckets and buckets for a native histogram, you have
+	// to define the regular buckets here explicitly.)
 	Buckets []float64
+
+	// If NativeHistogramBucketFactor is greater than one, so-called sparse
+	// buckets are used (in addition to the regular buckets, if defined
+	// above). A Histogram with sparse buckets will be ingested as a Native
+	// Histogram by a Prometheus server with that feature enabled (requires
+	// Prometheus v2.40+). Sparse buckets are exponential buckets covering
+	// the whole float64 range (with the exception of the “zero” bucket, see
+	// NativeHistogramZeroThreshold below). From any one bucket to the next,
+	// the width of the bucket grows by a constant
+	// factor. NativeHistogramBucketFactor provides an upper bound for this
+	// factor (exception see below). The smaller
+	// NativeHistogramBucketFactor, the more buckets will be used and thus
+	// the more costly the histogram will become. A generally good trade-off
+	// between cost and accuracy is a value of 1.1 (each bucket is at most
+	// 10% wider than the previous one), which will result in each power of
+	// two divided into 8 buckets (e.g. there will be 8 buckets between 1
+	// and 2, same as between 2 and 4, and 4 and 8, etc.).
+	//
+	// Details about the actually used factor: The factor is calculated as
+	// 2^(2^-n), where n is an integer number between (and including) -4 and
+	// 8. n is chosen so that the resulting factor is the largest that is
+	// still smaller or equal to NativeHistogramBucketFactor. Note that the
+	// smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8)
+	// ). If NativeHistogramBucketFactor is greater than 1 but smaller than
+	// 2^(2^-8), then the actually used factor is still 2^(2^-8) even though
+	// it is larger than the provided NativeHistogramBucketFactor.
+	//
+	// NOTE: Native Histograms are still an experimental feature. Their
+	// behavior might still change without a major version
+	// bump. Subsequently, all NativeHistogram... options here might still
+	// change their behavior or name (or might completely disappear) without
+	// a major version bump.
+	NativeHistogramBucketFactor float64
+	// All observations with an absolute value of less or equal
+	// NativeHistogramZeroThreshold are accumulated into a “zero” bucket.
+	// For best results, this should be close to a bucket boundary. This is
+	// usually the case if picking a power of two. If
+	// NativeHistogramZeroThreshold is left at zero,
+	// DefNativeHistogramZeroThreshold is used as the threshold. To
+	// configure a zero bucket with an actual threshold of zero (i.e. only
+	// observations of precisely zero will go into the zero bucket), set
+	// NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
+	// constant (or any negative float value).
+	NativeHistogramZeroThreshold float64
+
+	// The next three fields define a strategy to limit the number of
+	// populated sparse buckets. If NativeHistogramMaxBucketNumber is left
+	// at zero, the number of buckets is not limited. (Note that this might
+	// lead to unbounded memory consumption if the values observed by the
+	// Histogram are sufficiently wide-spread. In particular, this could be
+	// used as a DoS attack vector. Where the observed values depend on
+	// external inputs, it is highly recommended to set a
+	// NativeHistogramMaxBucketNumber.) Once the set
+	// NativeHistogramMaxBucketNumber is exceeded, the following strategy is
+	// enacted:
+	//  - First, if the last reset (or the creation) of the histogram is at
+	//    least NativeHistogramMinResetDuration ago, then the whole
+	//    histogram is reset to its initial state (including regular
+	//    buckets).
+	//  - If less time has passed, or if NativeHistogramMinResetDuration is
+	//    zero, no reset is performed. Instead, the zero threshold is
+	//    increased sufficiently to reduce the number of buckets to or below
+	//    NativeHistogramMaxBucketNumber, but not to more than
+	//    NativeHistogramMaxZeroThreshold. Thus, if
+	//    NativeHistogramMaxZeroThreshold is already at or below the current
+	//    zero threshold, nothing happens at this step.
+	//  - After that, if the number of buckets still exceeds
+	//    NativeHistogramMaxBucketNumber, the resolution of the histogram is
+	//    reduced by doubling the width of the sparse buckets (up to a
+	//    growth factor between one bucket to the next of 2^(2^4) = 65536,
+	//    see above).
+	//  - Any increased zero threshold or reduced resolution is reset back
+	//    to their original values once NativeHistogramMinResetDuration has
+	//    passed (since the last reset or the creation of the histogram).
+	NativeHistogramMaxBucketNumber  uint32
+	NativeHistogramMinResetDuration time.Duration
+	NativeHistogramMaxZeroThreshold float64
+
+	// NativeHistogramMaxExemplars limits the number of exemplars
+	// that are kept in memory for each native histogram. If you leave it at
+	// zero, a default value of 10 is used. If no exemplars should be kept specifically
+	// for native histograms, set it to a negative value. (Scrapers can
+	// still use the exemplars exposed for classic buckets, which are managed
+	// independently.)
+	NativeHistogramMaxExemplars int
+	// NativeHistogramExemplarTTL is only checked once
+	// NativeHistogramMaxExemplars is exceeded. In that case, the
+	// oldest exemplar is removed if it is older than NativeHistogramExemplarTTL.
+	// Otherwise, the older exemplar in the pair of exemplars that are closest
+	// together (on an exponential scale) is removed.
+	// If NativeHistogramExemplarTTL is left at its zero value, a default value of
+	// 5m is used. To always delete the oldest exemplar, set it to a negative value.
+	NativeHistogramExemplarTTL time.Duration
+
+	// now is for testing purposes, by default it's time.Now.
+	now func() time.Time
+
+	// afterFunc is for testing purposes, by default it's time.AfterFunc.
+	afterFunc func(time.Duration, func()) *time.Timer
+}
+
+// HistogramVecOpts bundles the options to create a HistogramVec metric.
+// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type HistogramVecOpts struct {
+	HistogramOpts
+
+	// VariableLabels are used to partition the metric vector by the given set
+	// of labels. Each label value will be constrained with the optional Constraint
+	// function, if provided.
+	VariableLabels ConstrainableLabels
 }
 
 // NewHistogram creates a new Histogram based on the provided HistogramOpts. It
@@ -175,11 +533,11 @@
 }
 
 func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
-	if len(desc.variableLabels) != len(labelValues) {
-		panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
+	if len(desc.variableLabels.names) != len(labelValues) {
+		panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
 	}
 
-	for _, n := range desc.variableLabels {
+	for _, n := range desc.variableLabels.names {
 		if n == bucketLabel {
 			panic(errBucketLabelNotAllowed)
 		}
@@ -190,16 +548,38 @@
 		}
 	}
 
-	if len(opts.Buckets) == 0 {
-		opts.Buckets = DefBuckets
+	if opts.now == nil {
+		opts.now = time.Now
+	}
+	if opts.afterFunc == nil {
+		opts.afterFunc = time.AfterFunc
 	}
 
 	h := &histogram{
-		desc:        desc,
-		upperBounds: opts.Buckets,
-		labelPairs:  MakeLabelPairs(desc, labelValues),
-		counts:      [2]*histogramCounts{{}, {}},
-		now:         time.Now,
+		desc:                            desc,
+		upperBounds:                     opts.Buckets,
+		labelPairs:                      MakeLabelPairs(desc, labelValues),
+		nativeHistogramMaxBuckets:       opts.NativeHistogramMaxBucketNumber,
+		nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold,
+		nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration,
+		lastResetTime:                   opts.now(),
+		now:                             opts.now,
+		afterFunc:                       opts.afterFunc,
+	}
+	if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 {
+		h.upperBounds = DefBuckets
+	}
+	if opts.NativeHistogramBucketFactor <= 1 {
+		h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets.
+	} else {
+		switch {
+		case opts.NativeHistogramZeroThreshold > 0:
+			h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold
+		case opts.NativeHistogramZeroThreshold == 0:
+			h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold
+		} // Leave h.nativeHistogramZeroThreshold at 0 otherwise.
+		h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor)
+		h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars)
 	}
 	for i, upperBound := range h.upperBounds {
 		if i < len(h.upperBounds)-1 {
@@ -218,8 +598,12 @@
 	}
 	// Finally we know the final length of h.upperBounds and can make buckets
 	// for both counts as well as exemplars:
-	h.counts[0].buckets = make([]uint64, len(h.upperBounds))
-	h.counts[1].buckets = make([]uint64, len(h.upperBounds))
+	h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
+	atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+	atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema)
+	h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
+	atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+	atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema)
 	h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
 
 	h.init(h) // Init self-collection.
@@ -227,13 +611,98 @@
 }
 
 type histogramCounts struct {
+	// Order in this struct matters for the alignment required by atomic
+	// operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+
 	// sumBits contains the bits of the float64 representing the sum of all
-	// observations. sumBits and count have to go first in the struct to
-	// guarantee alignment for atomic operations.
-	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	// observations.
 	sumBits uint64
 	count   uint64
+
+	// nativeHistogramZeroBucket counts all (positive and negative)
+	// observations in the zero bucket (with an absolute value less or equal
+	// the current threshold, see next field.
+	nativeHistogramZeroBucket uint64
+	// nativeHistogramZeroThresholdBits is the bit pattern of the current
+	// threshold for the zero bucket. It's initially equal to
+	// nativeHistogramZeroThreshold but may change according to the bucket
+	// count limitation strategy.
+	nativeHistogramZeroThresholdBits uint64
+	// nativeHistogramSchema may change over time according to the bucket
+	// count limitation strategy and therefore has to be saved here.
+	nativeHistogramSchema int32
+	// Number of (positive and negative) sparse buckets.
+	nativeHistogramBucketsNumber uint32
+
+	// Regular buckets.
 	buckets []uint64
+
+	// The sparse buckets for native histograms are implemented with a
+	// sync.Map for now. A dedicated data structure will likely be more
+	// efficient. There are separate maps for negative and positive
+	// observations. The map's value is an *int64, counting observations in
+	// that bucket. (Note that we don't use uint64 as an int64 won't
+	// overflow in practice, and working with signed numbers from the
+	// beginning simplifies the handling of deltas.) The map's key is the
+	// index of the bucket according to the used
+	// nativeHistogramSchema. Index 0 is for an upper bound of 1.
+	nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map
+}
+
+// observe manages the parts of observe that only affects
+// histogramCounts. doSparse is true if sparse buckets should be done,
+// too.
+func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) {
+	if bucket < len(hc.buckets) {
+		atomic.AddUint64(&hc.buckets[bucket], 1)
+	}
+	atomicAddFloat(&hc.sumBits, v)
+	if doSparse && !math.IsNaN(v) {
+		var (
+			key                  int
+			schema               = atomic.LoadInt32(&hc.nativeHistogramSchema)
+			zeroThreshold        = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits))
+			bucketCreated, isInf bool
+		)
+		if math.IsInf(v, 0) {
+			// Pretend v is MaxFloat64 but later increment key by one.
+			if math.IsInf(v, +1) {
+				v = math.MaxFloat64
+			} else {
+				v = -math.MaxFloat64
+			}
+			isInf = true
+		}
+		frac, exp := math.Frexp(math.Abs(v))
+		if schema > 0 {
+			bounds := nativeHistogramBounds[schema]
+			key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds)
+		} else {
+			key = exp
+			if frac == 0.5 {
+				key--
+			}
+			offset := (1 << -schema) - 1
+			key = (key + offset) >> -schema
+		}
+		if isInf {
+			key++
+		}
+		switch {
+		case v > zeroThreshold:
+			bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1)
+		case v < -zeroThreshold:
+			bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1)
+		default:
+			atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1)
+		}
+		if bucketCreated {
+			atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1)
+		}
+	}
+	// Increment count last as we take it as a signal that the observation
+	// is complete.
+	atomic.AddUint64(&hc.count, 1)
 }
 
 type histogram struct {
@@ -248,7 +717,7 @@
 	// perspective of the histogram) swap the hot–cold under the writeMtx
 	// lock. A cooldown is awaited (while locked) by comparing the number of
 	// observations with the initiation count. Once they match, then the
-	// last observation on the now cool one has completed. All cool fields must
+	// last observation on the now cool one has completed. All cold fields must
 	// be merged into the new hot before releasing writeMtx.
 	//
 	// Fields with atomic access first! See alignment constraint:
@@ -256,8 +725,10 @@
 	countAndHotIdx uint64
 
 	selfCollector
-	desc     *Desc
-	writeMtx sync.Mutex // Only used in the Write method.
+	desc *Desc
+
+	// Only used in the Write method and for sparse bucket management.
+	mtx sync.Mutex
 
 	// Two counts, one is "hot" for lock-free observations, the other is
 	// "cold" for writing out a dto.Metric. It has to be an array of
@@ -265,11 +736,27 @@
 	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
 	counts [2]*histogramCounts
 
-	upperBounds []float64
-	labelPairs  []*dto.LabelPair
-	exemplars   []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
+	upperBounds                     []float64
+	labelPairs                      []*dto.LabelPair
+	exemplars                       []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
+	nativeHistogramSchema           int32          // The initial schema. Set to math.MinInt32 if no sparse buckets are used.
+	nativeHistogramZeroThreshold    float64        // The initial zero threshold.
+	nativeHistogramMaxZeroThreshold float64
+	nativeHistogramMaxBuckets       uint32
+	nativeHistogramMinResetDuration time.Duration
+	// lastResetTime is protected by mtx. It is also used as created timestamp.
+	lastResetTime time.Time
+	// resetScheduled is protected by mtx. It is true if a reset is
+	// scheduled for a later time (when nativeHistogramMinResetDuration has
+	// passed).
+	resetScheduled  bool
+	nativeExemplars nativeExemplars
 
-	now func() time.Time // To mock out time.Now() for testing.
+	// now is for testing purposes, by default it's time.Now.
+	now func() time.Time
+
+	// afterFunc is for testing purposes, by default it's time.AfterFunc.
+	afterFunc func(time.Duration, func()) *time.Timer
 }
 
 func (h *histogram) Desc() *Desc {
@@ -280,6 +767,9 @@
 	h.observe(v, h.findBucket(v))
 }
 
+// ObserveWithExemplar should not be called in a high-frequency setting
+// for a native histogram with configured exemplars. For this case,
+// the implementation isn't lock-free and might suffer from lock contention.
 func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
 	i := h.findBucket(v)
 	h.observe(v, i)
@@ -291,8 +781,8 @@
 	// the hot path, i.e. Observe is called much more often than Write. The
 	// complication of making Write lock-free isn't worth it, if possible at
 	// all.
-	h.writeMtx.Lock()
-	defer h.writeMtx.Unlock()
+	h.mtx.Lock()
+	defer h.mtx.Unlock()
 
 	// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
 	// without touching the count bits. See the struct comments for a full
@@ -305,16 +795,17 @@
 	hotCounts := h.counts[n>>63]
 	coldCounts := h.counts[(^n)>>63]
 
-	// Await cooldown.
-	for count != atomic.LoadUint64(&coldCounts.count) {
-		runtime.Gosched() // Let observations get work done.
-	}
+	waitForCooldown(count, coldCounts)
 
 	his := &dto.Histogram{
-		Bucket:      make([]*dto.Bucket, len(h.upperBounds)),
-		SampleCount: proto.Uint64(count),
-		SampleSum:   proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+		Bucket:           make([]*dto.Bucket, len(h.upperBounds)),
+		SampleCount:      proto.Uint64(count),
+		SampleSum:        proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+		CreatedTimestamp: timestamppb.New(h.lastResetTime),
 	}
+	out.Histogram = his
+	out.Label = h.labelPairs
+
 	var cumCount uint64
 	for i, upperBound := range h.upperBounds {
 		cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
@@ -335,68 +826,330 @@
 		}
 		his.Bucket = append(his.Bucket, b)
 	}
+	if h.nativeHistogramSchema > math.MinInt32 {
+		his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits)))
+		his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema))
+		zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket)
 
-	out.Histogram = his
-	out.Label = h.labelPairs
+		defer func() {
+			coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber))
+			coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber))
+		}()
 
-	// Finally add all the cold counts to the new hot counts and reset the cold counts.
-	atomic.AddUint64(&hotCounts.count, count)
-	atomic.StoreUint64(&coldCounts.count, 0)
-	for {
-		oldBits := atomic.LoadUint64(&hotCounts.sumBits)
-		newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum())
-		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
-			atomic.StoreUint64(&coldCounts.sumBits, 0)
-			break
+		his.ZeroCount = proto.Uint64(zeroBucket)
+		his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative)
+		his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive)
+
+		// Add a no-op span to a histogram without observations and with
+		// a zero threshold of zero. Otherwise, a native histogram would
+		// look like a classic histogram to scrapers.
+		if *his.ZeroThreshold == 0 && *his.ZeroCount == 0 && len(his.PositiveSpan) == 0 && len(his.NegativeSpan) == 0 {
+			his.PositiveSpan = []*dto.BucketSpan{{
+				Offset: proto.Int32(0),
+				Length: proto.Uint32(0),
+			}}
 		}
+
+		if h.nativeExemplars.isEnabled() {
+			h.nativeExemplars.Lock()
+			his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...)
+			h.nativeExemplars.Unlock()
+		}
+
 	}
-	for i := range h.upperBounds {
-		atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i]))
-		atomic.StoreUint64(&coldCounts.buckets[i], 0)
-	}
+	addAndResetCounts(hotCounts, coldCounts)
 	return nil
 }
 
 // findBucket returns the index of the bucket for the provided value, or
 // len(h.upperBounds) for the +Inf bucket.
 func (h *histogram) findBucket(v float64) int {
-	// TODO(beorn7): For small numbers of buckets (<30), a linear search is
-	// slightly faster than the binary search. If we really care, we could
-	// switch from one search strategy to the other depending on the number
-	// of buckets.
-	//
-	// Microbenchmarks (BenchmarkHistogramNoLabels):
-	// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
-	// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
-	// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
+	n := len(h.upperBounds)
+	if n == 0 {
+		return 0
+	}
+
+	// Early exit: if v is less than or equal to the first upper bound, return 0
+	if v <= h.upperBounds[0] {
+		return 0
+	}
+
+	// Early exit: if v is greater than the last upper bound, return len(h.upperBounds)
+	if v > h.upperBounds[n-1] {
+		return n
+	}
+
+	// For small arrays, use simple linear search
+	// "magic number" 35 is result of tests on couple different (AWS and baremetal) servers
+	// see more details here: https://github.com/prometheus/client_golang/pull/1662
+	if n < 35 {
+		for i, bound := range h.upperBounds {
+			if v <= bound {
+				return i
+			}
+		}
+		// If v is greater than all upper bounds, return len(h.upperBounds)
+		return n
+	}
+
+	// For larger arrays, use stdlib's binary search
 	return sort.SearchFloat64s(h.upperBounds, v)
 }
 
 // observe is the implementation for Observe without the findBucket part.
 func (h *histogram) observe(v float64, bucket int) {
+	// Do not add to sparse buckets for NaN observations.
+	doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
 	// We increment h.countAndHotIdx so that the counter in the lower
 	// 63 bits gets incremented. At the same time, we get the new value
 	// back, which we can use to find the currently-hot counts.
 	n := atomic.AddUint64(&h.countAndHotIdx, 1)
 	hotCounts := h.counts[n>>63]
-
-	if bucket < len(h.upperBounds) {
-		atomic.AddUint64(&hotCounts.buckets[bucket], 1)
+	hotCounts.observe(v, bucket, doSparse)
+	if doSparse {
+		h.limitBuckets(hotCounts, v, bucket)
 	}
-	for {
-		oldBits := atomic.LoadUint64(&hotCounts.sumBits)
-		newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
-		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
-			break
-		}
-	}
-	// Increment count last as we take it as a signal that the observation
-	// is complete.
-	atomic.AddUint64(&hotCounts.count, 1)
 }
 
-// updateExemplar replaces the exemplar for the provided bucket. With empty
-// labels, it's a no-op. It panics if any of the labels is invalid.
+// limitBuckets applies a strategy to limit the number of populated sparse
+// buckets. It's generally best effort, and there are situations where the
+// number can go higher (if even the lowest resolution isn't enough to reduce
+// the number sufficiently, or if the provided counts aren't fully updated yet
+// by a concurrently happening Write call).
+func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) {
+	if h.nativeHistogramMaxBuckets == 0 {
+		return // No limit configured.
+	}
+	if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) {
+		return // Bucket limit not exceeded yet.
+	}
+
+	h.mtx.Lock()
+	defer h.mtx.Unlock()
+
+	// The hot counts might have been swapped just before we acquired the
+	// lock. Re-fetch the hot counts first...
+	n := atomic.LoadUint64(&h.countAndHotIdx)
+	hotIdx := n >> 63
+	coldIdx := (^n) >> 63
+	hotCounts := h.counts[hotIdx]
+	coldCounts := h.counts[coldIdx]
+	// ...and then check again if we really have to reduce the bucket count.
+	if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) {
+		return // Bucket limit not exceeded after all.
+	}
+	// Try the various strategies in order.
+	if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) {
+		return
+	}
+	// One of the other strategies will happen. To undo what they will do as
+	// soon as enough time has passed to satisfy
+	// h.nativeHistogramMinResetDuration, schedule a reset at the right time
+	// if we haven't done so already.
+	if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled {
+		h.resetScheduled = true
+		h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset)
+	}
+
+	if h.maybeWidenZeroBucket(hotCounts, coldCounts) {
+		return
+	}
+	h.doubleBucketWidth(hotCounts, coldCounts)
+}
+
+// maybeReset resets the whole histogram if at least
+// h.nativeHistogramMinResetDuration has been passed. It returns true if the
+// histogram has been reset. The caller must have locked h.mtx.
+func (h *histogram) maybeReset(
+	hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int,
+) bool {
+	// We are using the possibly mocked h.now() rather than
+	// time.Since(h.lastResetTime) to enable testing.
+	if h.nativeHistogramMinResetDuration == 0 || // No reset configured.
+		h.resetScheduled || // Do not interefere if a reset is already scheduled.
+		h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
+		return false
+	}
+	// Completely reset coldCounts.
+	h.resetCounts(cold)
+	// Repeat the latest observation to not lose it completely.
+	cold.observe(value, bucket, true)
+	// Make coldCounts the new hot counts while resetting countAndHotIdx.
+	n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1)
+	count := n & ((1 << 63) - 1)
+	waitForCooldown(count, hot)
+	// Finally, reset the formerly hot counts, too.
+	h.resetCounts(hot)
+	h.lastResetTime = h.now()
+	return true
+}
+
+// reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be
+// called without having locked h.mtx.
+func (h *histogram) reset() {
+	h.mtx.Lock()
+	defer h.mtx.Unlock()
+
+	n := atomic.LoadUint64(&h.countAndHotIdx)
+	hotIdx := n >> 63
+	coldIdx := (^n) >> 63
+	hot := h.counts[hotIdx]
+	cold := h.counts[coldIdx]
+	// Completely reset coldCounts.
+	h.resetCounts(cold)
+	// Make coldCounts the new hot counts while resetting countAndHotIdx.
+	n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63)
+	count := n & ((1 << 63) - 1)
+	waitForCooldown(count, hot)
+	// Finally, reset the formerly hot counts, too.
+	h.resetCounts(hot)
+	h.lastResetTime = h.now()
+	h.resetScheduled = false
+}
+
+// maybeWidenZeroBucket widens the zero bucket until it includes the existing
+// buckets closest to the zero bucket (which could be two, if an equidistant
+// negative and a positive bucket exists, but usually it's only one bucket to be
+// merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold
+// limits how far the zero bucket can be extended, and if that's not enough to
+// include an existing bucket, the method returns false. The caller must have
+// locked h.mtx.
+func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool {
+	currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits))
+	if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold {
+		return false
+	}
+	// Find the key of the bucket closest to zero.
+	smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive)
+	smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative)
+	if smallestNegativeKey < smallestKey {
+		smallestKey = smallestNegativeKey
+	}
+	if smallestKey == math.MaxInt32 {
+		return false
+	}
+	newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema))
+	if newZeroThreshold > h.nativeHistogramMaxZeroThreshold {
+		return false // New threshold would exceed the max threshold.
+	}
+	atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
+	// Remove applicable buckets.
+	if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded {
+		atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+	}
+	if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded {
+		atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+	}
+	// Make cold counts the new hot counts.
+	n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+	count := n & ((1 << 63) - 1)
+	// Swap the pointer names to represent the new roles and make
+	// the rest less confusing.
+	hot, cold = cold, hot
+	waitForCooldown(count, cold)
+	// Add all the now cold counts to the new hot counts...
+	addAndResetCounts(hot, cold)
+	// ...adjust the new zero threshold in the cold counts, too...
+	atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
+	// ...and then merge the newly deleted buckets into the wider zero
+	// bucket.
+	mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool {
+		return func(k, v interface{}) bool {
+			key := k.(int)
+			bucket := v.(*int64)
+			if key == smallestKey {
+				// Merge into hot zero bucket...
+				atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket)))
+				// ...and delete from cold counts.
+				coldBuckets.Delete(key)
+				atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+			} else {
+				// Add to corresponding hot bucket...
+				if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
+					atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
+				}
+				// ...and reset cold bucket.
+				atomic.StoreInt64(bucket, 0)
+			}
+			return true
+		}
+	}
+
+	cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive))
+	cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative))
+	return true
+}
+
+// doubleBucketWidth doubles the bucket width (by decrementing the schema
+// number). Note that very sparse buckets could lead to a low reduction of the
+// bucket count (or even no reduction at all). The method does nothing if the
+// schema is already -4.
+func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) {
+	coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema)
+	if coldSchema == -4 {
+		return // Already at lowest resolution.
+	}
+	coldSchema--
+	atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
+	// Play it simple and just delete all cold buckets.
+	atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
+	deleteSyncMap(&cold.nativeHistogramBucketsNegative)
+	deleteSyncMap(&cold.nativeHistogramBucketsPositive)
+	// Make coldCounts the new hot counts.
+	n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+	count := n & ((1 << 63) - 1)
+	// Swap the pointer names to represent the new roles and make
+	// the rest less confusing.
+	hot, cold = cold, hot
+	waitForCooldown(count, cold)
+	// Add all the now cold counts to the new hot counts...
+	addAndResetCounts(hot, cold)
+	// ...adjust the schema in the cold counts, too...
+	atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
+	// ...and then merge the cold buckets into the wider hot buckets.
+	merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool {
+		return func(k, v interface{}) bool {
+			key := k.(int)
+			bucket := v.(*int64)
+			// Adjust key to match the bucket to merge into.
+			if key > 0 {
+				key++
+			}
+			key /= 2
+			// Add to corresponding hot bucket.
+			if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
+				atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
+			}
+			return true
+		}
+	}
+
+	cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive))
+	cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative))
+	// Play it simple again and just delete all cold buckets.
+	atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
+	deleteSyncMap(&cold.nativeHistogramBucketsNegative)
+	deleteSyncMap(&cold.nativeHistogramBucketsPositive)
+}
+
+func (h *histogram) resetCounts(counts *histogramCounts) {
+	atomic.StoreUint64(&counts.sumBits, 0)
+	atomic.StoreUint64(&counts.count, 0)
+	atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0)
+	atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+	atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema)
+	atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0)
+	for i := range h.upperBounds {
+		atomic.StoreUint64(&counts.buckets[i], 0)
+	}
+	deleteSyncMap(&counts.nativeHistogramBucketsNegative)
+	deleteSyncMap(&counts.nativeHistogramBucketsPositive)
+}
+
+// updateExemplar replaces the exemplar for the provided classic bucket.
+// With empty labels, it's a no-op. It panics if any of the labels is invalid.
+// If histogram is native, the exemplar will be cached into nativeExemplars,
+// which has a limit, and will remove one exemplar when limit is reached.
 func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
 	if l == nil {
 		return
@@ -406,6 +1159,10 @@
 		panic(err)
 	}
 	h.exemplars[bucket].Store(e)
+	doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
+	if doSparse {
+		h.nativeExemplars.addExemplar(e)
+	}
 }
 
 // HistogramVec is a Collector that bundles a set of Histograms that all share the
@@ -420,15 +1177,23 @@
 // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
 // partitioned by the given label names.
 func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
-	desc := NewDesc(
+	return V2.NewHistogramVec(HistogramVecOpts{
+		HistogramOpts:  opts,
+		VariableLabels: UnconstrainedLabels(labelNames),
+	})
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts.
+func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec {
+	desc := V2.NewDesc(
 		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
 		opts.Help,
-		labelNames,
+		opts.VariableLabels,
 		opts.ConstLabels,
 	)
 	return &HistogramVec{
 		MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
-			return newHistogram(desc, opts, lvs...)
+			return newHistogram(desc, opts.HistogramOpts, lvs...)
 		}),
 	}
 }
@@ -488,7 +1253,8 @@
 // WithLabelValues works as GetMetricWithLabelValues, but panics where
 // GetMetricWithLabelValues would have returned an error. Not returning an
 // error allows shortcuts like
-//     myVec.WithLabelValues("404", "GET").Observe(42.21)
+//
+//	myVec.WithLabelValues("404", "GET").Observe(42.21)
 func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
 	h, err := v.GetMetricWithLabelValues(lvs...)
 	if err != nil {
@@ -499,7 +1265,8 @@
 
 // With works as GetMetricWith but panics where GetMetricWithLabels would have
 // returned an error. Not returning an error allows shortcuts like
-//     myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+//
+//	myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
 func (v *HistogramVec) With(labels Labels) Observer {
 	h, err := v.GetMetricWith(labels)
 	if err != nil {
@@ -545,6 +1312,7 @@
 	sum        float64
 	buckets    map[float64]uint64
 	labelPairs []*dto.LabelPair
+	createdTs  *timestamppb.Timestamp
 }
 
 func (h *constHistogram) Desc() *Desc {
@@ -552,12 +1320,14 @@
 }
 
 func (h *constHistogram) Write(out *dto.Metric) error {
-	his := &dto.Histogram{}
+	his := &dto.Histogram{
+		CreatedTimestamp: h.createdTs,
+	}
+
 	buckets := make([]*dto.Bucket, 0, len(h.buckets))
 
 	his.SampleCount = proto.Uint64(h.count)
 	his.SampleSum = proto.Float64(h.sum)
-
 	for upperBound, count := range h.buckets {
 		buckets = append(buckets, &dto.Bucket{
 			CumulativeCount: proto.Uint64(count),
@@ -585,7 +1355,7 @@
 // to send it to Prometheus in the Collect method.
 //
 // buckets is a map of upper bounds to cumulative counts, excluding the +Inf
-// bucket.
+// bucket. The +Inf bucket is implicit, and its value is equal to the provided count.
 //
 // NewConstHistogram returns an error if the length of labelValues is not
 // consistent with the variable labels in Desc or if Desc is invalid.
@@ -599,7 +1369,7 @@
 	if desc.err != nil {
 		return nil, desc.err
 	}
-	if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
 		return nil, err
 	}
 	return &constHistogram{
@@ -627,6 +1397,48 @@
 	return m
 }
 
+// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp.
+func NewConstHistogramWithCreatedTimestamp(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	buckets map[float64]uint64,
+	ct time.Time,
+	labelValues ...string,
+) (Metric, error) {
+	if desc.err != nil {
+		return nil, desc.err
+	}
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+		return nil, err
+	}
+	return &constHistogram{
+		desc:       desc,
+		count:      count,
+		sum:        sum,
+		buckets:    buckets,
+		labelPairs: MakeLabelPairs(desc, labelValues),
+		createdTs:  timestamppb.New(ct),
+	}, nil
+}
+
+// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where
+// NewConstHistogramWithCreatedTimestamp would have returned an error.
+func MustNewConstHistogramWithCreatedTimestamp(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	buckets map[float64]uint64,
+	ct time.Time,
+	labelValues ...string,
+) Metric {
+	m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...)
+	if err != nil {
+		panic(err)
+	}
+	return m
+}
+
 type buckSort []*dto.Bucket
 
 func (s buckSort) Len() int {
@@ -640,3 +1452,605 @@
 func (s buckSort) Less(i, j int) bool {
 	return s[i].GetUpperBound() < s[j].GetUpperBound()
 }
+
+// pickSchema returns the largest number n between -4 and 8 such that
+// 2^(2^-n) is less or equal the provided bucketFactor.
+//
+// Special cases:
+//   - bucketFactor <= 1: panics.
+//   - bucketFactor < 2^(2^-8) (but > 1): still returns 8.
+func pickSchema(bucketFactor float64) int32 {
+	if bucketFactor <= 1 {
+		panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor))
+	}
+	floor := math.Floor(math.Log2(math.Log2(bucketFactor)))
+	switch {
+	case floor <= -8:
+		return nativeHistogramSchemaMaximum
+	case floor >= 4:
+		return nativeHistogramSchemaMinimum
+	default:
+		return -int32(floor)
+	}
+}
+
+func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) {
+	var ii []int
+	buckets.Range(func(k, v interface{}) bool {
+		ii = append(ii, k.(int))
+		return true
+	})
+	sort.Ints(ii)
+
+	if len(ii) == 0 {
+		return nil, nil
+	}
+
+	var (
+		spans     []*dto.BucketSpan
+		deltas    []int64
+		prevCount int64
+		nextI     int
+	)
+
+	appendDelta := func(count int64) {
+		*spans[len(spans)-1].Length++
+		deltas = append(deltas, count-prevCount)
+		prevCount = count
+	}
+
+	for n, i := range ii {
+		v, _ := buckets.Load(i)
+		count := atomic.LoadInt64(v.(*int64))
+		// Multiple spans with only small gaps in between are probably
+		// encoded more efficiently as one larger span with a few empty
+		// buckets. Needs some research to find the sweet spot. For now,
+		// we assume that gaps of one or two buckets should not create
+		// a new span.
+		iDelta := int32(i - nextI)
+		if n == 0 || iDelta > 2 {
+			// We have to create a new span, either because we are
+			// at the very beginning, or because we have found a gap
+			// of more than two buckets.
+			spans = append(spans, &dto.BucketSpan{
+				Offset: proto.Int32(iDelta),
+				Length: proto.Uint32(0),
+			})
+		} else {
+			// We have found a small gap (or no gap at all).
+			// Insert empty buckets as needed.
+			for j := int32(0); j < iDelta; j++ {
+				appendDelta(0)
+			}
+		}
+		appendDelta(count)
+		nextI = i + 1
+	}
+	return spans, deltas
+}
+
+// addToBucket increments the sparse bucket at key by the provided amount. It
+// returns true if a new sparse bucket had to be created for that.
+func addToBucket(buckets *sync.Map, key int, increment int64) bool {
+	if existingBucket, ok := buckets.Load(key); ok {
+		// Fast path without allocation.
+		atomic.AddInt64(existingBucket.(*int64), increment)
+		return false
+	}
+	// Bucket doesn't exist yet. Slow path allocating new counter.
+	newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape.
+	if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded {
+		// The bucket was created concurrently in another goroutine.
+		// Have to increment after all.
+		atomic.AddInt64(actualBucket.(*int64), increment)
+		return false
+	}
+	return true
+}
+
+// addAndReset returns a function to be used with sync.Map.Range of spare
+// buckets in coldCounts. It increments the buckets in the provided hotBuckets
+// according to the buckets ranged through. It then resets all buckets ranged
+// through to 0 (but leaves them in place so that they don't need to get
+// recreated on the next scrape).
+func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool {
+	return func(k, v interface{}) bool {
+		bucket := v.(*int64)
+		if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) {
+			atomic.AddUint32(bucketNumber, 1)
+		}
+		atomic.StoreInt64(bucket, 0)
+		return true
+	}
+}
+
+func deleteSyncMap(m *sync.Map) {
+	m.Range(func(k, v interface{}) bool {
+		m.Delete(k)
+		return true
+	})
+}
+
+func findSmallestKey(m *sync.Map) int {
+	result := math.MaxInt32
+	m.Range(func(k, v interface{}) bool {
+		key := k.(int)
+		if key < result {
+			result = key
+		}
+		return true
+	})
+	return result
+}
+
+func getLe(key int, schema int32) float64 {
+	// Here a bit of context about the behavior for the last bucket counting
+	// regular numbers (called simply "last bucket" below) and the bucket
+	// counting observations of ±Inf (called "inf bucket" below, with a key
+	// one higher than that of the "last bucket"):
+	//
+	// If we apply the usual formula to the last bucket, its upper bound
+	// would be calculated as +Inf. The reason is that the max possible
+	// regular float64 number (math.MaxFloat64) doesn't coincide with one of
+	// the calculated bucket boundaries. So the calculated boundary has to
+	// be larger than math.MaxFloat64, and the only float64 larger than
+	// math.MaxFloat64 is +Inf. However, we want to count actual
+	// observations of ±Inf in the inf bucket. Therefore, we have to treat
+	// the upper bound of the last bucket specially and set it to
+	// math.MaxFloat64. (The upper bound of the inf bucket, with its key
+	// being one higher than that of the last bucket, naturally comes out as
+	// +Inf by the usual formula. So that's fine.)
+	//
+	// math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of
+	// 1024. If there were a float64 number following math.MaxFloat64, it
+	// would have a frac of 1.0 and an exp of 1024, or equivalently a frac
+	// of 0.5 and an exp of 1025. However, since frac must be smaller than
+	// 1, and exp must be smaller than 1025, either representation overflows
+	// a float64. (Which, in turn, is the reason that math.MaxFloat64 is the
+	// largest possible float64. Q.E.D.) However, the formula for
+	// calculating the upper bound from the idx and schema of the last
+	// bucket results in precisely that. It is either frac=1.0 & exp=1024
+	// (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is,
+	// by the way, a power of two where the exponent itself is a power of
+	// two, 2¹⁰ in fact, which coinicides with a bucket boundary in all
+	// schemas.) So these are the special cases we have to catch below.
+	if schema < 0 {
+		exp := key << -schema
+		if exp == 1024 {
+			// This is the last bucket before the overflow bucket
+			// (for ±Inf observations). Return math.MaxFloat64 as
+			// explained above.
+			return math.MaxFloat64
+		}
+		return math.Ldexp(1, exp)
+	}
+
+	fracIdx := key & ((1 << schema) - 1)
+	frac := nativeHistogramBounds[schema][fracIdx]
+	exp := (key >> schema) + 1
+	if frac == 0.5 && exp == 1025 {
+		// This is the last bucket before the overflow bucket (for ±Inf
+		// observations). Return math.MaxFloat64 as explained above.
+		return math.MaxFloat64
+	}
+	return math.Ldexp(frac, exp)
+}
+
+// waitForCooldown returns after the count field in the provided histogramCounts
+// has reached the provided count value.
+func waitForCooldown(count uint64, counts *histogramCounts) {
+	for count != atomic.LoadUint64(&counts.count) {
+		runtime.Gosched() // Let observations get work done.
+	}
+}
+
+// atomicAddFloat adds the provided float atomically to another float
+// represented by the bit pattern the bits pointer is pointing to.
+func atomicAddFloat(bits *uint64, v float64) {
+	for {
+		loadedBits := atomic.LoadUint64(bits)
+		newBits := math.Float64bits(math.Float64frombits(loadedBits) + v)
+		if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
+			break
+		}
+	}
+}
+
+// atomicDecUint32 atomically decrements the uint32 p points to.  See
+// https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done.
+func atomicDecUint32(p *uint32) {
+	atomic.AddUint32(p, ^uint32(0))
+}
+
+// addAndResetCounts adds certain fields (count, sum, conventional buckets, zero
+// bucket) from the cold counts to the corresponding fields in the hot
+// counts. Those fields are then reset to 0 in the cold counts.
+func addAndResetCounts(hot, cold *histogramCounts) {
+	atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count))
+	atomic.StoreUint64(&cold.count, 0)
+	coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits))
+	atomicAddFloat(&hot.sumBits, coldSum)
+	atomic.StoreUint64(&cold.sumBits, 0)
+	for i := range hot.buckets {
+		atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i]))
+		atomic.StoreUint64(&cold.buckets[i], 0)
+	}
+	atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket))
+	atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0)
+}
+
+type nativeExemplars struct {
+	sync.Mutex
+
+	// Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0.
+	// The ttl is used on insertion to remove an exemplar that is older than ttl, if present.
+	ttl time.Duration
+
+	exemplars []*dto.Exemplar
+}
+
+func (n *nativeExemplars) isEnabled() bool {
+	return n.ttl != -1
+}
+
+func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars {
+	if ttl == 0 {
+		ttl = 5 * time.Minute
+	}
+
+	if maxCount == 0 {
+		maxCount = 10
+	}
+
+	if maxCount < 0 {
+		maxCount = 0
+		ttl = -1
+	}
+
+	return nativeExemplars{
+		ttl:       ttl,
+		exemplars: make([]*dto.Exemplar, 0, maxCount),
+	}
+}
+
+func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
+	if !n.isEnabled() {
+		return
+	}
+
+	n.Lock()
+	defer n.Unlock()
+
+	// When the number of exemplars has not yet exceeded or
+	// is equal to cap(n.exemplars), then
+	// insert the new exemplar directly.
+	if len(n.exemplars) < cap(n.exemplars) {
+		var nIdx int
+		for nIdx = 0; nIdx < len(n.exemplars); nIdx++ {
+			if *e.Value < *n.exemplars[nIdx].Value {
+				break
+			}
+		}
+		n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)
+		return
+	}
+
+	if len(n.exemplars) == 1 {
+		// When the number of exemplars is 1, then
+		// replace the existing exemplar with the new exemplar.
+		n.exemplars[0] = e
+		return
+	}
+	// From this point on, the number of exemplars is greater than 1.
+
+	// When the number of exemplars exceeds the limit, remove one exemplar.
+	var (
+		ot    = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop.
+		otIdx = -1          // Index of the exemplar with the oldest timestamp.
+
+		md = -1.0 // Logarithm of the delta of the closest pair of exemplars.
+
+		// The insertion point of the new exemplar in the exemplars slice after insertion.
+		// This is calculated purely based on the order of the exemplars by value.
+		// nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end.
+		nIdx = -1
+
+		// rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar.
+		// The aim is to keep a good spread of exemplars by value and not let them bunch up too much.
+		// It is calculated in 3 steps:
+		//   1. First we set rIdx to the index of the older exemplar within the closest pair by value.
+		//      That is the following will be true (on log scale):
+		//      either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have
+		//      the closest values to each other from all pairs.
+		//      For example, suppose the values are distributed like this:
+		//        |-----------x-------------x----------------x----x-----|
+		//                                                   ^--rIdx as this is older.
+		//      Or like this:
+		//        |-----------x-------------x----------------x----x-----|
+		//                                                        ^--rIdx as this is older.
+		//   2. If there is an exemplar that expired, then we simple reset rIdx to that index.
+		//   3. We check if by inserting the new exemplar we would create a closer pair at
+		//      (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to
+		//      keep the spread of exemplars by value; otherwise we keep rIdx as it is.
+		rIdx = -1
+		cLog float64 // Logarithm of the current exemplar.
+		pLog float64 // Logarithm of the previous exemplar.
+	)
+
+	for i, exemplar := range n.exemplars {
+		// Find the exemplar with the oldest timestamp.
+		if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) {
+			ot = exemplar.Timestamp.AsTime()
+			otIdx = i
+		}
+
+		// Find the index at which to insert new the exemplar.
+		if nIdx == -1 && *e.Value <= *exemplar.Value {
+			nIdx = i
+		}
+
+		// Find the two closest exemplars and pick the one the with older timestamp.
+		pLog = cLog
+		cLog = math.Log(exemplar.GetValue())
+		if i == 0 {
+			continue
+		}
+		diff := math.Abs(cLog - pLog)
+		if md == -1 || diff < md {
+			// The closest exemplar pair is at index: i-1, i.
+			// Choose the exemplar with the older timestamp for replacement.
+			md = diff
+			if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) {
+				rIdx = i
+			} else {
+				rIdx = i - 1
+			}
+		}
+
+	}
+
+	// If all existing exemplar are smaller than new exemplar,
+	// then the exemplar should be inserted at the end.
+	if nIdx == -1 {
+		nIdx = len(n.exemplars)
+	}
+	// Here, we have the following relationships:
+	// n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0)
+	// e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars))
+
+	if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl {
+		// If the oldest exemplar has expired, then replace it with the new exemplar.
+		rIdx = otIdx
+	} else {
+		// In the previous for loop, when calculating the closest pair of exemplars,
+		// we did not take into account the newly inserted exemplar.
+		// So we need to calculate with the newly inserted exemplar again.
+		elog := math.Log(e.GetValue())
+		if nIdx > 0 {
+			diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue()))
+			if diff < md {
+				// The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx.
+				//                                            v--rIdx
+				// |-----------x-n-----------x----------------x----x-----|
+				//     nIdx-1--^ ^--new exemplar value
+				// Do not make the spread worse, replace nIdx-1 and not rIdx.
+				md = diff
+				rIdx = nIdx - 1
+			}
+		}
+		if nIdx < len(n.exemplars) {
+			diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog)
+			if diff < md {
+				// The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx.
+				//                                            v--rIdx
+				// |-----------x-----------n-x----------------x----x-----|
+				//     new exemplar value--^ ^--nIdx
+				// Do not make the spread worse, replace nIdx-1 and not rIdx.
+				rIdx = nIdx
+			}
+		}
+	}
+
+	// Adjust the slice according to rIdx and nIdx.
+	switch {
+	case rIdx == nIdx:
+		n.exemplars[nIdx] = e
+	case rIdx < nIdx:
+		n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...)
+	case rIdx > nIdx:
+		n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...)
+	}
+}
+
+type constNativeHistogram struct {
+	desc *Desc
+	dto.Histogram
+	labelPairs []*dto.LabelPair
+}
+
+func validateCount(sum float64, count uint64, negativeBuckets, positiveBuckets map[int]int64, zeroBucket uint64) error {
+	var bucketPopulationSum int64
+	for _, v := range positiveBuckets {
+		bucketPopulationSum += v
+	}
+	for _, v := range negativeBuckets {
+		bucketPopulationSum += v
+	}
+	bucketPopulationSum += int64(zeroBucket)
+
+	// If the sum of observations is NaN, the number of observations must be greater or equal to the sum of all bucket counts.
+	// Otherwise, the number of observations must be equal to the sum of all bucket counts .
+
+	if math.IsNaN(sum) && bucketPopulationSum > int64(count) ||
+		!math.IsNaN(sum) && bucketPopulationSum != int64(count) {
+		return errors.New("the sum of all bucket populations exceeds the count of observations")
+	}
+	return nil
+}
+
+// NewConstNativeHistogram returns a metric representing a Prometheus native histogram with
+// fixed values for the count, sum, and positive/negative/zero bucket counts. As those parameters
+// cannot be changed, the returned value does not implement the Histogram
+// interface (but only the Metric interface). Users of this package will not
+// have much use for it in regular operations. However, when implementing custom
+// OpenTelemetry Collectors, it is useful as a throw-away metric that is generated on the fly
+// to send it to Prometheus in the Collect method.
+//
+// zeroBucket counts all (positive and negative)
+// observations in the zero bucket (with an absolute value less or equal
+// the current threshold).
+// positiveBuckets and negativeBuckets are separate maps for negative and positive
+// observations. The map's value is an int64, counting observations in
+// that bucket. The map's key is the
+// index of the bucket according to the used
+// Schema. Index 0 is for an upper bound of 1 in positive buckets and for a lower bound of -1 in negative buckets.
+// NewConstNativeHistogram returns an error if
+//   - the length of labelValues is not consistent with the variable labels in Desc or if Desc is invalid.
+//   - the schema passed is not between 8 and -4
+//   - the sum of counts in all buckets including the zero bucket does not equal the count if sum is not NaN (or exceeds the count if sum is NaN)
+//
+// See https://opentelemetry.io/docs/specs/otel/compatibility/prometheus_and_openmetrics/#exponential-histograms for more details about the conversion from OTel to Prometheus.
+func NewConstNativeHistogram(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	positiveBuckets, negativeBuckets map[int]int64,
+	zeroBucket uint64,
+	schema int32,
+	zeroThreshold float64,
+	createdTimestamp time.Time,
+	labelValues ...string,
+) (Metric, error) {
+	if desc.err != nil {
+		return nil, desc.err
+	}
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+		return nil, err
+	}
+	if schema > nativeHistogramSchemaMaximum || schema < nativeHistogramSchemaMinimum {
+		return nil, errors.New("invalid native histogram schema")
+	}
+	if err := validateCount(sum, count, negativeBuckets, positiveBuckets, zeroBucket); err != nil {
+		return nil, err
+	}
+
+	NegativeSpan, NegativeDelta := makeBucketsFromMap(negativeBuckets)
+	PositiveSpan, PositiveDelta := makeBucketsFromMap(positiveBuckets)
+	ret := &constNativeHistogram{
+		desc: desc,
+		Histogram: dto.Histogram{
+			CreatedTimestamp: timestamppb.New(createdTimestamp),
+			Schema:           &schema,
+			ZeroThreshold:    &zeroThreshold,
+			SampleCount:      &count,
+			SampleSum:        &sum,
+
+			NegativeSpan:  NegativeSpan,
+			NegativeDelta: NegativeDelta,
+
+			PositiveSpan:  PositiveSpan,
+			PositiveDelta: PositiveDelta,
+
+			ZeroCount: proto.Uint64(zeroBucket),
+		},
+		labelPairs: MakeLabelPairs(desc, labelValues),
+	}
+	if *ret.ZeroThreshold == 0 && *ret.ZeroCount == 0 && len(ret.PositiveSpan) == 0 && len(ret.NegativeSpan) == 0 {
+		ret.PositiveSpan = []*dto.BucketSpan{{
+			Offset: proto.Int32(0),
+			Length: proto.Uint32(0),
+		}}
+	}
+	return ret, nil
+}
+
+// MustNewConstNativeHistogram is a version of NewConstNativeHistogram that panics where
+// NewConstNativeHistogram would have returned an error.
+func MustNewConstNativeHistogram(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	positiveBuckets, negativeBuckets map[int]int64,
+	zeroBucket uint64,
+	nativeHistogramSchema int32,
+	nativeHistogramZeroThreshold float64,
+	createdTimestamp time.Time,
+	labelValues ...string,
+) Metric {
+	nativehistogram, err := NewConstNativeHistogram(desc,
+		count,
+		sum,
+		positiveBuckets,
+		negativeBuckets,
+		zeroBucket,
+		nativeHistogramSchema,
+		nativeHistogramZeroThreshold,
+		createdTimestamp,
+		labelValues...)
+	if err != nil {
+		panic(err)
+	}
+	return nativehistogram
+}
+
+func (h *constNativeHistogram) Desc() *Desc {
+	return h.desc
+}
+
+func (h *constNativeHistogram) Write(out *dto.Metric) error {
+	out.Histogram = &h.Histogram
+	out.Label = h.labelPairs
+	return nil
+}
+
+func makeBucketsFromMap(buckets map[int]int64) ([]*dto.BucketSpan, []int64) {
+	if len(buckets) == 0 {
+		return nil, nil
+	}
+	var ii []int
+	for k := range buckets {
+		ii = append(ii, k)
+	}
+	sort.Ints(ii)
+
+	var (
+		spans     []*dto.BucketSpan
+		deltas    []int64
+		prevCount int64
+		nextI     int
+	)
+
+	appendDelta := func(count int64) {
+		*spans[len(spans)-1].Length++
+		deltas = append(deltas, count-prevCount)
+		prevCount = count
+	}
+
+	for n, i := range ii {
+		count := buckets[i]
+		// Multiple spans with only small gaps in between are probably
+		// encoded more efficiently as one larger span with a few empty
+		// buckets. Needs some research to find the sweet spot. For now,
+		// we assume that gaps of one or two buckets should not create
+		// a new span.
+		iDelta := int32(i - nextI)
+		if n == 0 || iDelta > 2 {
+			// We have to create a new span, either because we are
+			// at the very beginning, or because we have found a gap
+			// of more than two buckets.
+			spans = append(spans, &dto.BucketSpan{
+				Offset: proto.Int32(iDelta),
+				Length: proto.Uint32(0),
+			})
+		} else {
+			// We have found a small gap (or no gap at all).
+			// Insert empty buckets as needed.
+			for j := int32(0); j < iDelta; j++ {
+				appendDelta(0)
+			}
+		}
+		appendDelta(count)
+		nextI = i + 1
+	}
+	return spans, deltas
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
new file mode 100644
index 0000000..1ed5abe
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
@@ -0,0 +1,60 @@
+// Copyright (c) 2015 Björn Rabenstein
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+//
+// The code in this package is copy/paste to avoid a dependency. Hence this file
+// carries the copyright of the original repo.
+// https://github.com/beorn7/floats
+package internal
+
+import (
+	"math"
+)
+
+// minNormalFloat64 is the smallest positive normal value of type float64.
+var minNormalFloat64 = math.Float64frombits(0x0010000000000000)
+
+// AlmostEqualFloat64 returns true if a and b are equal within a relative error
+// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the
+// details of the applied method.
+func AlmostEqualFloat64(a, b, epsilon float64) bool {
+	if a == b {
+		return true
+	}
+	absA := math.Abs(a)
+	absB := math.Abs(b)
+	diff := math.Abs(a - b)
+	if a == 0 || b == 0 || absA+absB < minNormalFloat64 {
+		return diff < epsilon*minNormalFloat64
+	}
+	return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon
+}
+
+// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64.
+func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool {
+	if len(a) != len(b) {
+		return false
+	}
+	for i := range a {
+		if !AlmostEqualFloat64(a[i], b[i], epsilon) {
+			return false
+		}
+	}
+	return true
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
new file mode 100644
index 0000000..7bac0da
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
@@ -0,0 +1,655 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// It provides tools to compare sequences of strings and generate textual diffs.
+//
+// Maintaining `GetUnifiedDiffString` here because original repository
+// (https://github.com/pmezard/go-difflib) is no longer maintained.
+package internal
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+)
+
+func minInt(a, b int) int {
+	if a < b {
+		return a
+	}
+	return b
+}
+
+func maxInt(a, b int) int {
+	if a > b {
+		return a
+	}
+	return b
+}
+
+func calculateRatio(matches, length int) float64 {
+	if length > 0 {
+		return 2.0 * float64(matches) / float64(length)
+	}
+	return 1.0
+}
+
+type Match struct {
+	A    int
+	B    int
+	Size int
+}
+
+type OpCode struct {
+	Tag byte
+	I1  int
+	I2  int
+	J1  int
+	J2  int
+}
+
+// SequenceMatcher compares sequence of strings. The basic
+// algorithm predates, and is a little fancier than, an algorithm
+// published in the late 1980's by Ratcliff and Obershelp under the
+// hyperbolic name "gestalt pattern matching".  The basic idea is to find
+// the longest contiguous matching subsequence that contains no "junk"
+// elements (R-O doesn't address junk).  The same idea is then applied
+// recursively to the pieces of the sequences to the left and to the right
+// of the matching subsequence.  This does not yield minimal edit
+// sequences, but does tend to yield matches that "look right" to people.
+//
+// SequenceMatcher tries to compute a "human-friendly diff" between two
+// sequences.  Unlike e.g. UNIX(tm) diff, the fundamental notion is the
+// longest *contiguous* & junk-free matching subsequence.  That's what
+// catches peoples' eyes.  The Windows(tm) windiff has another interesting
+// notion, pairing up elements that appear uniquely in each sequence.
+// That, and the method here, appear to yield more intuitive difference
+// reports than does diff.  This method appears to be the least vulnerable
+// to synching up on blocks of "junk lines", though (like blank lines in
+// ordinary text files, or maybe "<P>" lines in HTML files).  That may be
+// because this is the only method of the 3 that has a *concept* of
+// "junk" <wink>.
+//
+// Timing:  Basic R-O is cubic time worst case and quadratic time expected
+// case.  SequenceMatcher is quadratic time for the worst case and has
+// expected-case behavior dependent in a complicated way on how many
+// elements the sequences have in common; best case time is linear.
+type SequenceMatcher struct {
+	a              []string
+	b              []string
+	b2j            map[string][]int
+	IsJunk         func(string) bool
+	autoJunk       bool
+	bJunk          map[string]struct{}
+	matchingBlocks []Match
+	fullBCount     map[string]int
+	bPopular       map[string]struct{}
+	opCodes        []OpCode
+}
+
+func NewMatcher(a, b []string) *SequenceMatcher {
+	m := SequenceMatcher{autoJunk: true}
+	m.SetSeqs(a, b)
+	return &m
+}
+
+func NewMatcherWithJunk(a, b []string, autoJunk bool,
+	isJunk func(string) bool,
+) *SequenceMatcher {
+	m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
+	m.SetSeqs(a, b)
+	return &m
+}
+
+// Set two sequences to be compared.
+func (m *SequenceMatcher) SetSeqs(a, b []string) {
+	m.SetSeq1(a)
+	m.SetSeq2(b)
+}
+
+// Set the first sequence to be compared. The second sequence to be compared is
+// not changed.
+//
+// SequenceMatcher computes and caches detailed information about the second
+// sequence, so if you want to compare one sequence S against many sequences,
+// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
+// sequences.
+//
+// See also SetSeqs() and SetSeq2().
+func (m *SequenceMatcher) SetSeq1(a []string) {
+	if &a == &m.a {
+		return
+	}
+	m.a = a
+	m.matchingBlocks = nil
+	m.opCodes = nil
+}
+
+// Set the second sequence to be compared. The first sequence to be compared is
+// not changed.
+func (m *SequenceMatcher) SetSeq2(b []string) {
+	if &b == &m.b {
+		return
+	}
+	m.b = b
+	m.matchingBlocks = nil
+	m.opCodes = nil
+	m.fullBCount = nil
+	m.chainB()
+}
+
+func (m *SequenceMatcher) chainB() {
+	// Populate line -> index mapping
+	b2j := map[string][]int{}
+	for i, s := range m.b {
+		indices := b2j[s]
+		indices = append(indices, i)
+		b2j[s] = indices
+	}
+
+	// Purge junk elements
+	m.bJunk = map[string]struct{}{}
+	if m.IsJunk != nil {
+		junk := m.bJunk
+		for s := range b2j {
+			if m.IsJunk(s) {
+				junk[s] = struct{}{}
+			}
+		}
+		for s := range junk {
+			delete(b2j, s)
+		}
+	}
+
+	// Purge remaining popular elements
+	popular := map[string]struct{}{}
+	n := len(m.b)
+	if m.autoJunk && n >= 200 {
+		ntest := n/100 + 1
+		for s, indices := range b2j {
+			if len(indices) > ntest {
+				popular[s] = struct{}{}
+			}
+		}
+		for s := range popular {
+			delete(b2j, s)
+		}
+	}
+	m.bPopular = popular
+	m.b2j = b2j
+}
+
+func (m *SequenceMatcher) isBJunk(s string) bool {
+	_, ok := m.bJunk[s]
+	return ok
+}
+
+// Find longest matching block in a[alo:ahi] and b[blo:bhi].
+//
+// If IsJunk is not defined:
+//
+// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
+//
+//	alo <= i <= i+k <= ahi
+//	blo <= j <= j+k <= bhi
+//
+// and for all (i',j',k') meeting those conditions,
+//
+//	k >= k'
+//	i <= i'
+//	and if i == i', j <= j'
+//
+// In other words, of all maximal matching blocks, return one that
+// starts earliest in a, and of all those maximal matching blocks that
+// start earliest in a, return the one that starts earliest in b.
+//
+// If IsJunk is defined, first the longest matching block is
+// determined as above, but with the additional restriction that no
+// junk element appears in the block.  Then that block is extended as
+// far as possible by matching (only) junk elements on both sides.  So
+// the resulting block never matches on junk except as identical junk
+// happens to be adjacent to an "interesting" match.
+//
+// If no blocks match, return (alo, blo, 0).
+func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
+	// CAUTION:  stripping common prefix or suffix would be incorrect.
+	// E.g.,
+	//    ab
+	//    acab
+	// Longest matching block is "ab", but if common prefix is
+	// stripped, it's "a" (tied with "b").  UNIX(tm) diff does so
+	// strip, so ends up claiming that ab is changed to acab by
+	// inserting "ca" in the middle.  That's minimal but unintuitive:
+	// "it's obvious" that someone inserted "ac" at the front.
+	// Windiff ends up at the same place as diff, but by pairing up
+	// the unique 'b's and then matching the first two 'a's.
+	besti, bestj, bestsize := alo, blo, 0
+
+	// find longest junk-free match
+	// during an iteration of the loop, j2len[j] = length of longest
+	// junk-free match ending with a[i-1] and b[j]
+	j2len := map[int]int{}
+	for i := alo; i != ahi; i++ {
+		// look at all instances of a[i] in b; note that because
+		// b2j has no junk keys, the loop is skipped if a[i] is junk
+		newj2len := map[int]int{}
+		for _, j := range m.b2j[m.a[i]] {
+			// a[i] matches b[j]
+			if j < blo {
+				continue
+			}
+			if j >= bhi {
+				break
+			}
+			k := j2len[j-1] + 1
+			newj2len[j] = k
+			if k > bestsize {
+				besti, bestj, bestsize = i-k+1, j-k+1, k
+			}
+		}
+		j2len = newj2len
+	}
+
+	// Extend the best by non-junk elements on each end.  In particular,
+	// "popular" non-junk elements aren't in b2j, which greatly speeds
+	// the inner loop above, but also means "the best" match so far
+	// doesn't contain any junk *or* popular non-junk elements.
+	for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
+		m.a[besti-1] == m.b[bestj-1] {
+		besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+	}
+	for besti+bestsize < ahi && bestj+bestsize < bhi &&
+		!m.isBJunk(m.b[bestj+bestsize]) &&
+		m.a[besti+bestsize] == m.b[bestj+bestsize] {
+		bestsize++
+	}
+
+	// Now that we have a wholly interesting match (albeit possibly
+	// empty!), we may as well suck up the matching junk on each
+	// side of it too.  Can't think of a good reason not to, and it
+	// saves post-processing the (possibly considerable) expense of
+	// figuring out what to do with it.  In the case of an empty
+	// interesting match, this is clearly the right thing to do,
+	// because no other kind of match is possible in the regions.
+	for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
+		m.a[besti-1] == m.b[bestj-1] {
+		besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+	}
+	for besti+bestsize < ahi && bestj+bestsize < bhi &&
+		m.isBJunk(m.b[bestj+bestsize]) &&
+		m.a[besti+bestsize] == m.b[bestj+bestsize] {
+		bestsize++
+	}
+
+	return Match{A: besti, B: bestj, Size: bestsize}
+}
+
+// Return list of triples describing matching subsequences.
+//
+// Each triple is of the form (i, j, n), and means that
+// a[i:i+n] == b[j:j+n].  The triples are monotonically increasing in
+// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
+// adjacent triples in the list, and the second is not the last triple in the
+// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
+// adjacent equal blocks.
+//
+// The last triple is a dummy, (len(a), len(b), 0), and is the only
+// triple with n==0.
+func (m *SequenceMatcher) GetMatchingBlocks() []Match {
+	if m.matchingBlocks != nil {
+		return m.matchingBlocks
+	}
+
+	var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
+	matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
+		match := m.findLongestMatch(alo, ahi, blo, bhi)
+		i, j, k := match.A, match.B, match.Size
+		if match.Size > 0 {
+			if alo < i && blo < j {
+				matched = matchBlocks(alo, i, blo, j, matched)
+			}
+			matched = append(matched, match)
+			if i+k < ahi && j+k < bhi {
+				matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
+			}
+		}
+		return matched
+	}
+	matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
+
+	// It's possible that we have adjacent equal blocks in the
+	// matching_blocks list now.
+	nonAdjacent := []Match{}
+	i1, j1, k1 := 0, 0, 0
+	for _, b := range matched {
+		// Is this block adjacent to i1, j1, k1?
+		i2, j2, k2 := b.A, b.B, b.Size
+		if i1+k1 == i2 && j1+k1 == j2 {
+			// Yes, so collapse them -- this just increases the length of
+			// the first block by the length of the second, and the first
+			// block so lengthened remains the block to compare against.
+			k1 += k2
+		} else {
+			// Not adjacent.  Remember the first block (k1==0 means it's
+			// the dummy we started with), and make the second block the
+			// new block to compare against.
+			if k1 > 0 {
+				nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+			}
+			i1, j1, k1 = i2, j2, k2
+		}
+	}
+	if k1 > 0 {
+		nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+	}
+
+	nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
+	m.matchingBlocks = nonAdjacent
+	return m.matchingBlocks
+}
+
+// Return list of 5-tuples describing how to turn a into b.
+//
+// Each tuple is of the form (tag, i1, i2, j1, j2).  The first tuple
+// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
+// tuple preceding it, and likewise for j1 == the previous j2.
+//
+// The tags are characters, with these meanings:
+//
+// 'r' (replace):  a[i1:i2] should be replaced by b[j1:j2]
+//
+// 'd' (delete):   a[i1:i2] should be deleted, j1==j2 in this case.
+//
+// 'i' (insert):   b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
+//
+// 'e' (equal):    a[i1:i2] == b[j1:j2]
+func (m *SequenceMatcher) GetOpCodes() []OpCode {
+	if m.opCodes != nil {
+		return m.opCodes
+	}
+	i, j := 0, 0
+	matching := m.GetMatchingBlocks()
+	opCodes := make([]OpCode, 0, len(matching))
+	for _, m := range matching {
+		//  invariant:  we've pumped out correct diffs to change
+		//  a[:i] into b[:j], and the next matching block is
+		//  a[ai:ai+size] == b[bj:bj+size]. So we need to pump
+		//  out a diff to change a[i:ai] into b[j:bj], pump out
+		//  the matching block, and move (i,j) beyond the match
+		ai, bj, size := m.A, m.B, m.Size
+		tag := byte(0)
+		if i < ai && j < bj {
+			tag = 'r'
+		} else if i < ai {
+			tag = 'd'
+		} else if j < bj {
+			tag = 'i'
+		}
+		if tag > 0 {
+			opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
+		}
+		i, j = ai+size, bj+size
+		// the list of matching blocks is terminated by a
+		// sentinel with size 0
+		if size > 0 {
+			opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
+		}
+	}
+	m.opCodes = opCodes
+	return m.opCodes
+}
+
+// Isolate change clusters by eliminating ranges with no changes.
+//
+// Return a generator of groups with up to n lines of context.
+// Each group is in the same format as returned by GetOpCodes().
+func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
+	if n < 0 {
+		n = 3
+	}
+	codes := m.GetOpCodes()
+	if len(codes) == 0 {
+		codes = []OpCode{{'e', 0, 1, 0, 1}}
+	}
+	// Fixup leading and trailing groups if they show no changes.
+	if codes[0].Tag == 'e' {
+		c := codes[0]
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+		codes[0] = OpCode{c.Tag, maxInt(i1, i2-n), i2, maxInt(j1, j2-n), j2}
+	}
+	if codes[len(codes)-1].Tag == 'e' {
+		c := codes[len(codes)-1]
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+		codes[len(codes)-1] = OpCode{c.Tag, i1, minInt(i2, i1+n), j1, minInt(j2, j1+n)}
+	}
+	nn := n + n
+	groups := [][]OpCode{}
+	group := []OpCode{}
+	for _, c := range codes {
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+		// End the current group and start a new one whenever
+		// there is a large range with no changes.
+		if c.Tag == 'e' && i2-i1 > nn {
+			group = append(group, OpCode{
+				c.Tag, i1, minInt(i2, i1+n),
+				j1, minInt(j2, j1+n),
+			})
+			groups = append(groups, group)
+			group = []OpCode{}
+			i1, j1 = maxInt(i1, i2-n), maxInt(j1, j2-n)
+		}
+		group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
+	}
+	if len(group) > 0 && (len(group) != 1 || group[0].Tag != 'e') {
+		groups = append(groups, group)
+	}
+	return groups
+}
+
+// Return a measure of the sequences' similarity (float in [0,1]).
+//
+// Where T is the total number of elements in both sequences, and
+// M is the number of matches, this is 2.0*M / T.
+// Note that this is 1 if the sequences are identical, and 0 if
+// they have nothing in common.
+//
+// .Ratio() is expensive to compute if you haven't already computed
+// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
+// want to try .QuickRatio() or .RealQuickRation() first to get an
+// upper bound.
+func (m *SequenceMatcher) Ratio() float64 {
+	matches := 0
+	for _, m := range m.GetMatchingBlocks() {
+		matches += m.Size
+	}
+	return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() relatively quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute.
+func (m *SequenceMatcher) QuickRatio() float64 {
+	// viewing a and b as multisets, set matches to the cardinality
+	// of their intersection; this counts the number of matches
+	// without regard to order, so is clearly an upper bound
+	if m.fullBCount == nil {
+		m.fullBCount = map[string]int{}
+		for _, s := range m.b {
+			m.fullBCount[s]++
+		}
+	}
+
+	// avail[x] is the number of times x appears in 'b' less the
+	// number of times we've seen it in 'a' so far ... kinda
+	avail := map[string]int{}
+	matches := 0
+	for _, s := range m.a {
+		n, ok := avail[s]
+		if !ok {
+			n = m.fullBCount[s]
+		}
+		avail[s] = n - 1
+		if n > 0 {
+			matches++
+		}
+	}
+	return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() very quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute than either .Ratio() or .QuickRatio().
+func (m *SequenceMatcher) RealQuickRatio() float64 {
+	la, lb := len(m.a), len(m.b)
+	return calculateRatio(minInt(la, lb), la+lb)
+}
+
+// Convert range to the "ed" format
+func formatRangeUnified(start, stop int) string {
+	// Per the diff spec at http://www.unix.org/single_unix_specification/
+	beginning := start + 1 // lines start numbering with one
+	length := stop - start
+	if length == 1 {
+		return strconv.Itoa(beginning)
+	}
+	if length == 0 {
+		beginning-- // empty ranges begin at line just before the range
+	}
+	return fmt.Sprintf("%d,%d", beginning, length)
+}
+
+// Unified diff parameters
+type UnifiedDiff struct {
+	A        []string // First sequence lines
+	FromFile string   // First file name
+	FromDate string   // First file time
+	B        []string // Second sequence lines
+	ToFile   string   // Second file name
+	ToDate   string   // Second file time
+	Eol      string   // Headers end of line, defaults to LF
+	Context  int      // Number of context lines
+}
+
+// Compare two sequences of lines; generate the delta as a unified diff.
+//
+// Unified diffs are a compact way of showing line changes and a few
+// lines of context.  The number of context lines is set by 'n' which
+// defaults to three.
+//
+// By default, the diff control lines (those with ---, +++, or @@) are
+// created with a trailing newline.  This is helpful so that inputs
+// created from file.readlines() result in diffs that are suitable for
+// file.writelines() since both the inputs and outputs have trailing
+// newlines.
+//
+// For inputs that do not have trailing newlines, set the lineterm
+// argument to "" so that the output will be uniformly newline free.
+//
+// The unidiff format normally has a header for filenames and modification
+// times.  Any or all of these may be specified using strings for
+// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
+// The modification times are normally expressed in the ISO 8601 format.
+func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
+	buf := bufio.NewWriter(writer)
+	defer buf.Flush()
+	wf := func(format string, args ...interface{}) error {
+		_, err := fmt.Fprintf(buf, format, args...)
+		return err
+	}
+	ws := func(s string) error {
+		_, err := buf.WriteString(s)
+		return err
+	}
+
+	if len(diff.Eol) == 0 {
+		diff.Eol = "\n"
+	}
+
+	started := false
+	m := NewMatcher(diff.A, diff.B)
+	for _, g := range m.GetGroupedOpCodes(diff.Context) {
+		if !started {
+			started = true
+			fromDate := ""
+			if len(diff.FromDate) > 0 {
+				fromDate = "\t" + diff.FromDate
+			}
+			toDate := ""
+			if len(diff.ToDate) > 0 {
+				toDate = "\t" + diff.ToDate
+			}
+			if diff.FromFile != "" || diff.ToFile != "" {
+				err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
+				if err != nil {
+					return err
+				}
+				err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
+				if err != nil {
+					return err
+				}
+			}
+		}
+		first, last := g[0], g[len(g)-1]
+		range1 := formatRangeUnified(first.I1, last.I2)
+		range2 := formatRangeUnified(first.J1, last.J2)
+		if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
+			return err
+		}
+		for _, c := range g {
+			i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+			if c.Tag == 'e' {
+				for _, line := range diff.A[i1:i2] {
+					if err := ws(" " + line); err != nil {
+						return err
+					}
+				}
+				continue
+			}
+			if c.Tag == 'r' || c.Tag == 'd' {
+				for _, line := range diff.A[i1:i2] {
+					if err := ws("-" + line); err != nil {
+						return err
+					}
+				}
+			}
+			if c.Tag == 'r' || c.Tag == 'i' {
+				for _, line := range diff.B[j1:j2] {
+					if err := ws("+" + line); err != nil {
+						return err
+					}
+				}
+			}
+		}
+	}
+	return nil
+}
+
+// Like WriteUnifiedDiff but returns the diff a string.
+func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
+	w := &bytes.Buffer{}
+	err := WriteUnifiedDiff(w, diff)
+	return w.String(), err
+}
+
+// Split a string on "\n" while preserving them. The output can be used
+// as input for UnifiedDiff and ContextDiff structures.
+func SplitLines(s string) []string {
+	lines := strings.SplitAfter(s, "\n")
+	lines[len(lines)-1] += "\n"
+	return lines
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
new file mode 100644
index 0000000..a4fa6ea
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
@@ -0,0 +1,34 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import "regexp"
+
+type GoCollectorRule struct {
+	Matcher *regexp.Regexp
+	Deny    bool
+}
+
+// GoCollectorOptions should not be used be directly by anything, except `collectors` package.
+// Use it via collectors package instead. See issue
+// https://github.com/prometheus/client_golang/issues/1030.
+//
+// This is internal, so external users only can use it via `collector.WithGoCollector*` methods
+type GoCollectorOptions struct {
+	DisableMemStatsLikeMetrics bool
+	RuntimeMetricSumForHist    map[string]string
+	RuntimeMetricRules         []GoCollectorRule
+}
+
+var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
new file mode 100644
index 0000000..d273b66
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
@@ -0,0 +1,143 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.17
+// +build go1.17
+
+package internal
+
+import (
+	"math"
+	"path"
+	"runtime/metrics"
+	"strings"
+
+	"github.com/prometheus/common/model"
+)
+
+// RuntimeMetricsToProm produces a Prometheus metric name from a runtime/metrics
+// metric description and validates whether the metric is suitable for integration
+// with Prometheus.
+//
+// Returns false if a name could not be produced, or if Prometheus does not understand
+// the runtime/metrics Kind.
+//
+// Note that the main reason a name couldn't be produced is if the runtime/metrics
+// package exports a name with characters outside the valid Prometheus metric name
+// character set. This is theoretically possible, but should never happen in practice.
+// Still, don't rely on it.
+func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) {
+	namespace := "go"
+
+	comp := strings.SplitN(d.Name, ":", 2)
+	key := comp[0]
+	unit := comp[1]
+
+	// The last path element in the key is the name,
+	// the rest is the subsystem.
+	subsystem := path.Dir(key[1:] /* remove leading / */)
+	name := path.Base(key)
+
+	// subsystem is translated by replacing all / and - with _.
+	subsystem = strings.ReplaceAll(subsystem, "/", "_")
+	subsystem = strings.ReplaceAll(subsystem, "-", "_")
+
+	// unit is translated assuming that the unit contains no
+	// non-ASCII characters.
+	unit = strings.ReplaceAll(unit, "-", "_")
+	unit = strings.ReplaceAll(unit, "*", "_")
+	unit = strings.ReplaceAll(unit, "/", "_per_")
+
+	// name has - replaced with _ and is concatenated with the unit and
+	// other data.
+	name = strings.ReplaceAll(name, "-", "_")
+	name += "_" + unit
+	if d.Cumulative && d.Kind != metrics.KindFloat64Histogram {
+		name += "_total"
+	}
+
+	// Our current conversion moves to legacy naming, so use legacy validation.
+	valid := model.LegacyValidation.IsValidMetricName(namespace + "_" + subsystem + "_" + name)
+	switch d.Kind {
+	case metrics.KindUint64:
+	case metrics.KindFloat64:
+	case metrics.KindFloat64Histogram:
+	default:
+		valid = false
+	}
+	return namespace, subsystem, name, valid
+}
+
+// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram
+// type (so, lower-bound inclusive) and a unit from a runtime/metrics name, and produces
+// a reduced set of buckets. This function always removes any -Inf bucket as it's represented
+// as the bottom-most upper-bound inclusive bucket in Prometheus.
+func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
+	switch unit {
+	case "bytes":
+		// Re-bucket as powers of 2.
+		return reBucketExp(buckets, 2)
+	case "seconds":
+		// Re-bucket as powers of 10 and then merge all buckets greater
+		// than 1 second into the +Inf bucket.
+		b := reBucketExp(buckets, 10)
+		for i := range b {
+			if b[i] <= 1 {
+				continue
+			}
+			b[i] = math.Inf(1)
+			b = b[:i+1]
+			break
+		}
+		return b
+	}
+	return buckets
+}
+
+// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and
+// downsamples the buckets to those a multiple of base apart. The end result
+// is a roughly exponential (in many cases, perfectly exponential) bucketing
+// scheme.
+func reBucketExp(buckets []float64, base float64) []float64 {
+	bucket := buckets[0]
+	var newBuckets []float64
+	// We may see a -Inf here, in which case, add it and skip it
+	// since we risk producing NaNs otherwise.
+	//
+	// We need to preserve -Inf values to maintain runtime/metrics
+	// conventions. We'll strip it out later.
+	if bucket == math.Inf(-1) {
+		newBuckets = append(newBuckets, bucket)
+		buckets = buckets[1:]
+		bucket = buckets[0]
+	}
+	// From now on, bucket should always have a non-Inf value because
+	// Infs are only ever at the ends of the bucket lists, so
+	// arithmetic operations on it are non-NaN.
+	for i := 1; i < len(buckets); i++ {
+		if bucket >= 0 && buckets[i] < bucket*base {
+			// The next bucket we want to include is at least bucket*base.
+			continue
+		} else if bucket < 0 && buckets[i] < bucket/base {
+			// In this case the bucket we're targeting is negative, and since
+			// we're ascending through buckets here, we need to divide to get
+			// closer to zero exponentially.
+			continue
+		}
+		// The +Inf bucket will always be the last one, and we'll always
+		// end up including it here because bucket
+		newBuckets = append(newBuckets, bucket)
+		bucket = buckets[i]
+	}
+	return append(newBuckets, bucket)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
index 351c26e..6515c11 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
@@ -19,18 +19,34 @@
 	dto "github.com/prometheus/client_model/go"
 )
 
-// metricSorter is a sortable slice of *dto.Metric.
-type metricSorter []*dto.Metric
+// LabelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers.
+type LabelPairSorter []*dto.LabelPair
 
-func (s metricSorter) Len() int {
+func (s LabelPairSorter) Len() int {
 	return len(s)
 }
 
-func (s metricSorter) Swap(i, j int) {
+func (s LabelPairSorter) Swap(i, j int) {
 	s[i], s[j] = s[j], s[i]
 }
 
-func (s metricSorter) Less(i, j int) bool {
+func (s LabelPairSorter) Less(i, j int) bool {
+	return s[i].GetName() < s[j].GetName()
+}
+
+// MetricSorter is a sortable slice of *dto.Metric.
+type MetricSorter []*dto.Metric
+
+func (s MetricSorter) Len() int {
+	return len(s)
+}
+
+func (s MetricSorter) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func (s MetricSorter) Less(i, j int) bool {
 	if len(s[i].Label) != len(s[j].Label) {
 		// This should not happen. The metrics are
 		// inconsistent. However, we have to deal with the fact, as
@@ -68,7 +84,7 @@
 // the slice, with the contained Metrics sorted within each MetricFamily.
 func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
 	for _, mf := range metricFamiliesByName {
-		sort.Sort(metricSorter(mf.Metric))
+		sort.Sort(MetricSorter(mf.Metric))
 	}
 	names := make([]string, 0, len(metricFamiliesByName))
 	for name, mf := range metricFamiliesByName {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
index 2744443..5fe8d3b 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -25,12 +25,111 @@
 // Labels represents a collection of label name -> value mappings. This type is
 // commonly used with the With(Labels) and GetMetricWith(Labels) methods of
 // metric vector Collectors, e.g.:
-//     myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+//	myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
 //
 // The other use-case is the specification of constant label pairs in Opts or to
 // create a Desc.
 type Labels map[string]string
 
+// LabelConstraint normalizes label values.
+type LabelConstraint func(string) string
+
+// ConstrainedLabels represents a label name and its constrain function
+// to normalize label values. This type is commonly used when constructing
+// metric vector Collectors.
+type ConstrainedLabel struct {
+	Name       string
+	Constraint LabelConstraint
+}
+
+// ConstrainableLabels is an interface that allows creating of labels that can
+// be optionally constrained.
+//
+//	prometheus.V2().NewCounterVec(CounterVecOpts{
+//	  CounterOpts: {...}, // Usual CounterOpts fields
+//	  VariableLabels: []ConstrainedLabels{
+//	    {Name: "A"},
+//	    {Name: "B", Constraint: func(v string) string { ... }},
+//	  },
+//	})
+type ConstrainableLabels interface {
+	compile() *compiledLabels
+	labelNames() []string
+}
+
+// ConstrainedLabels represents a collection of label name -> constrain function
+// to normalize label values. This type is commonly used when constructing
+// metric vector Collectors.
+type ConstrainedLabels []ConstrainedLabel
+
+func (cls ConstrainedLabels) compile() *compiledLabels {
+	compiled := &compiledLabels{
+		names:            make([]string, len(cls)),
+		labelConstraints: map[string]LabelConstraint{},
+	}
+
+	for i, label := range cls {
+		compiled.names[i] = label.Name
+		if label.Constraint != nil {
+			compiled.labelConstraints[label.Name] = label.Constraint
+		}
+	}
+
+	return compiled
+}
+
+func (cls ConstrainedLabels) labelNames() []string {
+	names := make([]string, len(cls))
+	for i, label := range cls {
+		names[i] = label.Name
+	}
+	return names
+}
+
+// UnconstrainedLabels represents collection of label without any constraint on
+// their value. Thus, it is simply a collection of label names.
+//
+//	UnconstrainedLabels([]string{ "A", "B" })
+//
+// is equivalent to
+//
+//	ConstrainedLabels {
+//	  { Name: "A" },
+//	  { Name: "B" },
+//	}
+type UnconstrainedLabels []string
+
+func (uls UnconstrainedLabels) compile() *compiledLabels {
+	return &compiledLabels{
+		names: uls,
+	}
+}
+
+func (uls UnconstrainedLabels) labelNames() []string {
+	return uls
+}
+
+type compiledLabels struct {
+	names            []string
+	labelConstraints map[string]LabelConstraint
+}
+
+func (cls *compiledLabels) compile() *compiledLabels {
+	return cls
+}
+
+func (cls *compiledLabels) labelNames() []string {
+	return cls.names
+}
+
+func (cls *compiledLabels) constrain(labelName, value string) string {
+	if fn, ok := cls.labelConstraints[labelName]; ok && fn != nil {
+		return fn(value)
+	}
+	return value
+}
+
 // reservedLabelPrefix is a prefix which is not legal in user-supplied
 // label names.
 const reservedLabelPrefix = "__"
@@ -39,7 +138,7 @@
 
 func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error {
 	return fmt.Errorf(
-		"%s: %q has %d variable labels named %q but %d values %q were provided",
+		"%w: %q has %d variable labels named %q but %d values %q were provided",
 		errInconsistentCardinality, fqName,
 		len(labels), labels,
 		len(labelValues), labelValues,
@@ -49,7 +148,7 @@
 func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
 	if len(labels) != expectedNumberOfValues {
 		return fmt.Errorf(
-			"%s: expected %d label values but got %d in %#v",
+			"%w: expected %d label values but got %d in %#v",
 			errInconsistentCardinality, expectedNumberOfValues,
 			len(labels), labels,
 		)
@@ -66,8 +165,10 @@
 
 func validateLabelValues(vals []string, expectedNumberOfValues int) error {
 	if len(vals) != expectedNumberOfValues {
+		// The call below makes vals escape, copy them to avoid that.
+		vals := append([]string(nil), vals...)
 		return fmt.Errorf(
-			"%s: expected %d label values but got %d in %#v",
+			"%w: expected %d label values but got %d in %#v",
 			errInconsistentCardinality, expectedNumberOfValues,
 			len(vals), vals,
 		)
@@ -83,5 +184,6 @@
 }
 
 func checkLabelName(l string) bool {
-	return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
+	//nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme.
+	return model.NameValidationScheme.IsValidLabelName(l) && !strings.HasPrefix(l, reservedLabelPrefix)
 }
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
index dc12191..76e59f1 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -14,14 +14,15 @@
 package prometheus
 
 import (
+	"errors"
+	"math"
+	"sort"
 	"strings"
 	"time"
 
-	//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
-	"github.com/golang/protobuf/proto"
-	"github.com/prometheus/common/model"
-
 	dto "github.com/prometheus/client_model/go"
+	"github.com/prometheus/common/model"
+	"google.golang.org/protobuf/proto"
 )
 
 var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
@@ -91,6 +92,9 @@
 	// machine_role metric). See also
 	// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
 	ConstLabels Labels
+
+	// now is for testing purposes, by default it's time.Now.
+	now func() time.Time
 }
 
 // BuildFQName joins the given three name components by "_". Empty name
@@ -104,31 +108,23 @@
 	if name == "" {
 		return ""
 	}
-	switch {
-	case namespace != "" && subsystem != "":
-		return strings.Join([]string{namespace, subsystem, name}, "_")
-	case namespace != "":
-		return strings.Join([]string{namespace, name}, "_")
-	case subsystem != "":
-		return strings.Join([]string{subsystem, name}, "_")
+
+	sb := strings.Builder{}
+	sb.Grow(len(namespace) + len(subsystem) + len(name) + 2)
+
+	if namespace != "" {
+		sb.WriteString(namespace)
+		sb.WriteString("_")
 	}
-	return name
-}
 
-// labelPairSorter implements sort.Interface. It is used to sort a slice of
-// dto.LabelPair pointers.
-type labelPairSorter []*dto.LabelPair
+	if subsystem != "" {
+		sb.WriteString(subsystem)
+		sb.WriteString("_")
+	}
 
-func (s labelPairSorter) Len() int {
-	return len(s)
-}
+	sb.WriteString(name)
 
-func (s labelPairSorter) Swap(i, j int) {
-	s[i], s[j] = s[j], s[i]
-}
-
-func (s labelPairSorter) Less(i, j int) bool {
-	return s[i].GetName() < s[j].GetName()
+	return sb.String()
 }
 
 type invalidMetric struct {
@@ -174,3 +170,107 @@
 func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
 	return timestampedMetric{Metric: m, t: t}
 }
+
+type withExemplarsMetric struct {
+	Metric
+
+	exemplars []*dto.Exemplar
+}
+
+func (m *withExemplarsMetric) Write(pb *dto.Metric) error {
+	if err := m.Metric.Write(pb); err != nil {
+		return err
+	}
+
+	switch {
+	case pb.Counter != nil:
+		pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1]
+	case pb.Histogram != nil:
+		h := pb.Histogram
+		for _, e := range m.exemplars {
+			if (h.GetZeroThreshold() != 0 || h.GetZeroCount() != 0 ||
+				len(h.PositiveSpan) != 0 || len(h.NegativeSpan) != 0) &&
+				e.GetTimestamp() != nil {
+				h.Exemplars = append(h.Exemplars, e)
+				if len(h.Bucket) == 0 {
+					// Don't proceed to classic buckets if there are none.
+					continue
+				}
+			}
+			// h.Bucket are sorted by UpperBound.
+			i := sort.Search(len(h.Bucket), func(i int) bool {
+				return h.Bucket[i].GetUpperBound() >= e.GetValue()
+			})
+			if i < len(h.Bucket) {
+				h.Bucket[i].Exemplar = e
+			} else {
+				// The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365.
+				b := &dto.Bucket{
+					CumulativeCount: proto.Uint64(h.GetSampleCount()),
+					UpperBound:      proto.Float64(math.Inf(1)),
+					Exemplar:        e,
+				}
+				h.Bucket = append(h.Bucket, b)
+			}
+		}
+	default:
+		// TODO(bwplotka): Implement Gauge?
+		return errors.New("cannot inject exemplar into Gauge, Summary or Untyped")
+	}
+
+	return nil
+}
+
+// Exemplar is easier to use, user-facing representation of *dto.Exemplar.
+type Exemplar struct {
+	Value  float64
+	Labels Labels
+	// Optional.
+	// Default value (time.Time{}) indicates its empty, which should be
+	// understood as time.Now() time at the moment of creation of metric.
+	Timestamp time.Time
+}
+
+// NewMetricWithExemplars returns a new Metric wrapping the provided Metric with given
+// exemplars. Exemplars are validated.
+//
+// Only last applicable exemplar is injected from the list.
+// For example for Counter it means last exemplar is injected.
+// For Histogram, it means last applicable exemplar for each bucket is injected.
+// For a Native Histogram, all valid exemplars are injected.
+//
+// NewMetricWithExemplars works best with MustNewConstMetric and
+// MustNewConstHistogram, see example.
+func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) {
+	if len(exemplars) == 0 {
+		return nil, errors.New("no exemplar was passed for NewMetricWithExemplars")
+	}
+
+	var (
+		now = time.Now()
+		exs = make([]*dto.Exemplar, len(exemplars))
+		err error
+	)
+	for i, e := range exemplars {
+		ts := e.Timestamp
+		if ts.IsZero() {
+			ts = now
+		}
+		exs[i], err = newExemplar(e.Value, ts, e.Labels)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return &withExemplarsMetric{Metric: m, exemplars: exs}, nil
+}
+
+// MustNewMetricWithExemplars is a version of NewMetricWithExemplars that panics where
+// NewMetricWithExemplars would have returned an error.
+func MustNewMetricWithExemplars(m Metric, exemplars ...Exemplar) Metric {
+	ret, err := NewMetricWithExemplars(m, exemplars...)
+	if err != nil {
+		panic(err)
+	}
+	return ret
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go
new file mode 100644
index 0000000..7c12b21
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go
@@ -0,0 +1,25 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !js || wasm
+// +build !js wasm
+
+package prometheus
+
+import "runtime"
+
+// getRuntimeNumThreads returns the number of open OS threads.
+func getRuntimeNumThreads() float64 {
+	n, _ := runtime.ThreadCreateProfile(nil)
+	return float64(n)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go
new file mode 100644
index 0000000..7348df0
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go
@@ -0,0 +1,22 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build js && !wasm
+// +build js,!wasm
+
+package prometheus
+
+// getRuntimeNumThreads returns the number of open OS threads.
+func getRuntimeNumThreads() float64 {
+	return 1
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
index 4412801..03773b2 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/observer.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
@@ -58,7 +58,7 @@
 // current time as timestamp, and the provided Labels. Empty Labels will lead to
 // a valid (label-less) exemplar. But if Labels is nil, the current exemplar is
 // left in place. ObserveWithExemplar panics if any of the provided labels are
-// invalid or if the provided labels contain more than 64 runes in total.
+// invalid or if the provided labels contain more than 128 runes in total.
 type ExemplarObserver interface {
 	ObserveWithExemplar(value float64, exemplar Labels)
 }
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
index 5bfe0ff..e7bce8b 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -16,21 +16,22 @@
 import (
 	"errors"
 	"fmt"
-	"io/ioutil"
 	"os"
 	"strconv"
 	"strings"
 )
 
 type processCollector struct {
-	collectFn       func(chan<- Metric)
-	pidFn           func() (int, error)
-	reportErrors    bool
-	cpuTotal        *Desc
-	openFDs, maxFDs *Desc
-	vsize, maxVsize *Desc
-	rss             *Desc
-	startTime       *Desc
+	collectFn         func(chan<- Metric)
+	describeFn        func(chan<- *Desc)
+	pidFn             func() (int, error)
+	reportErrors      bool
+	cpuTotal          *Desc
+	openFDs, maxFDs   *Desc
+	vsize, maxVsize   *Desc
+	rss               *Desc
+	startTime         *Desc
+	inBytes, outBytes *Desc
 }
 
 // ProcessCollectorOpts defines the behavior of a process metrics collector
@@ -101,11 +102,20 @@
 			"Start time of the process since unix epoch in seconds.",
 			nil, nil,
 		),
+		inBytes: NewDesc(
+			ns+"process_network_receive_bytes_total",
+			"Number of bytes received by the process over the network.",
+			nil, nil,
+		),
+		outBytes: NewDesc(
+			ns+"process_network_transmit_bytes_total",
+			"Number of bytes sent by the process over the network.",
+			nil, nil,
+		),
 	}
 
 	if opts.PidFn == nil {
-		pid := os.Getpid()
-		c.pidFn = func() (int, error) { return pid, nil }
+		c.pidFn = getPIDFn()
 	} else {
 		c.pidFn = opts.PidFn
 	}
@@ -113,24 +123,23 @@
 	// Set up process metric collection if supported by the runtime.
 	if canCollectProcess() {
 		c.collectFn = c.processCollect
+		c.describeFn = c.describe
 	} else {
-		c.collectFn = func(ch chan<- Metric) {
-			c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
-		}
+		c.collectFn = c.errorCollectFn
+		c.describeFn = c.errorDescribeFn
 	}
 
 	return c
 }
 
-// Describe returns all descriptions of the collector.
-func (c *processCollector) Describe(ch chan<- *Desc) {
-	ch <- c.cpuTotal
-	ch <- c.openFDs
-	ch <- c.maxFDs
-	ch <- c.vsize
-	ch <- c.maxVsize
-	ch <- c.rss
-	ch <- c.startTime
+func (c *processCollector) errorCollectFn(ch chan<- Metric) {
+	c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
+}
+
+func (c *processCollector) errorDescribeFn(ch chan<- *Desc) {
+	if c.reportErrors {
+		ch <- NewInvalidDesc(errors.New("process metrics not supported on this platform"))
+	}
 }
 
 // Collect returns the current state of all metrics of the collector.
@@ -138,6 +147,11 @@
 	c.collectFn(ch)
 }
 
+// Describe returns all descriptions of the collector.
+func (c *processCollector) Describe(ch chan<- *Desc) {
+	c.describeFn(ch)
+}
+
 func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
 	if !c.reportErrors {
 		return
@@ -152,13 +166,13 @@
 // It is meant to be used for the PidFn field in ProcessCollectorOpts.
 func NewPidFileFn(pidFilePath string) func() (int, error) {
 	return func() (int, error) {
-		content, err := ioutil.ReadFile(pidFilePath)
+		content, err := os.ReadFile(pidFilePath)
 		if err != nil {
-			return 0, fmt.Errorf("can't read pid file %q: %+v", pidFilePath, err)
+			return 0, fmt.Errorf("can't read pid file %q: %w", pidFilePath, err)
 		}
 		pid, err := strconv.Atoi(strings.TrimSpace(string(content)))
 		if err != nil {
-			return 0, fmt.Errorf("can't parse pid file %q: %+v", pidFilePath, err)
+			return 0, fmt.Errorf("can't parse pid file %q: %w", pidFilePath, err)
 		}
 
 		return pid, nil
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
new file mode 100644
index 0000000..b32c95f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
@@ -0,0 +1,130 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin && !ios
+
+package prometheus
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"syscall"
+	"time"
+
+	"golang.org/x/sys/unix"
+)
+
+// errNotImplemented is returned by stub functions that replace cgo functions, when cgo
+// isn't available.
+var errNotImplemented = errors.New("not implemented")
+
+type memoryInfo struct {
+	vsize uint64 // Virtual memory size in bytes
+	rss   uint64 // Resident memory size in bytes
+}
+
+func canCollectProcess() bool {
+	return true
+}
+
+func getSoftLimit(which int) (uint64, error) {
+	rlimit := syscall.Rlimit{}
+
+	if err := syscall.Getrlimit(which, &rlimit); err != nil {
+		return 0, err
+	}
+
+	return rlimit.Cur, nil
+}
+
+func getOpenFileCount() (float64, error) {
+	// Alternately, the undocumented proc_pidinfo(PROC_PIDLISTFDS) can be used to
+	// return a list of open fds, but that requires a way to call C APIs.  The
+	// benefits, however, include fewer system calls and not failing when at the
+	// open file soft limit.
+
+	if dir, err := os.Open("/dev/fd"); err != nil {
+		return 0.0, err
+	} else {
+		defer dir.Close()
+
+		// Avoid ReadDir(), as it calls stat(2) on each descriptor.  Not only is
+		// that info not used, but KQUEUE descriptors fail stat(2), which causes
+		// the whole method to fail.
+		if names, err := dir.Readdirnames(0); err != nil {
+			return 0.0, err
+		} else {
+			// Subtract 1 to ignore the open /dev/fd descriptor above.
+			return float64(len(names) - 1), nil
+		}
+	}
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+	if procs, err := unix.SysctlKinfoProcSlice("kern.proc.pid", os.Getpid()); err == nil {
+		if len(procs) == 1 {
+			startTime := float64(procs[0].Proc.P_starttime.Nano() / 1e9)
+			ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
+		} else {
+			err = fmt.Errorf("sysctl() returned %d proc structs (expected 1)", len(procs))
+			c.reportError(ch, c.startTime, err)
+		}
+	} else {
+		c.reportError(ch, c.startTime, err)
+	}
+
+	// The proc structure returned by kern.proc.pid above has an Rusage member,
+	// but it is not filled in, so it needs to be fetched by getrusage(2).  For
+	// that call, the UTime, STime, and Maxrss members are filled out, but not
+	// Ixrss, Idrss, or Isrss for the memory usage.  Memory stats will require
+	// access to the C API to call task_info(TASK_BASIC_INFO).
+	rusage := unix.Rusage{}
+
+	if err := unix.Getrusage(syscall.RUSAGE_SELF, &rusage); err == nil {
+		cpuTime := time.Duration(rusage.Stime.Nano() + rusage.Utime.Nano()).Seconds()
+		ch <- MustNewConstMetric(c.cpuTotal, CounterValue, cpuTime)
+	} else {
+		c.reportError(ch, c.cpuTotal, err)
+	}
+
+	if memInfo, err := getMemory(); err == nil {
+		ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss))
+		ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize))
+	} else if !errors.Is(err, errNotImplemented) {
+		// Don't report an error when support is not compiled in.
+		c.reportError(ch, c.rss, err)
+		c.reportError(ch, c.vsize, err)
+	}
+
+	if fds, err := getOpenFileCount(); err == nil {
+		ch <- MustNewConstMetric(c.openFDs, GaugeValue, fds)
+	} else {
+		c.reportError(ch, c.openFDs, err)
+	}
+
+	if openFiles, err := getSoftLimit(syscall.RLIMIT_NOFILE); err == nil {
+		ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(openFiles))
+	} else {
+		c.reportError(ch, c.maxFDs, err)
+	}
+
+	if addressSpace, err := getSoftLimit(syscall.RLIMIT_AS); err == nil {
+		ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(addressSpace))
+	} else {
+		c.reportError(ch, c.maxVsize, err)
+	}
+
+	// TODO: socket(PF_SYSTEM) to fetch "com.apple.network.statistics" might
+	//  be able to get the per-process network send/receive counts.
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c
new file mode 100644
index 0000000..d00a243
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c
@@ -0,0 +1,84 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin && !ios && cgo
+
+#include <mach/mach_init.h>
+#include <mach/task.h>
+#include <mach/mach_vm.h>
+
+// The compiler warns that mach/shared_memory_server.h is deprecated, and to use
+// mach/shared_region.h instead.  But that doesn't define
+// SHARED_DATA_REGION_SIZE or SHARED_TEXT_REGION_SIZE, so redefine them here and
+// avoid a warning message when running tests.
+#define GLOBAL_SHARED_TEXT_SEGMENT      0x90000000U
+#define SHARED_DATA_REGION_SIZE         0x10000000
+#define SHARED_TEXT_REGION_SIZE         0x10000000
+
+
+int get_memory_info(unsigned long long *rss, unsigned long long *vsize)
+{
+    // This is lightly adapted from how ps(1) obtains its memory info.
+    // https://github.com/apple-oss-distributions/adv_cmds/blob/8744084ea0ff41ca4bb96b0f9c22407d0e48e9b7/ps/tasks.c#L109
+
+    kern_return_t               error;
+    task_t                      task = MACH_PORT_NULL;
+    mach_task_basic_info_data_t info;
+    mach_msg_type_number_t      info_count = MACH_TASK_BASIC_INFO_COUNT;
+
+    error = task_info(
+                mach_task_self(),
+                MACH_TASK_BASIC_INFO,
+                (task_info_t) &info,
+                &info_count );
+
+    if( error != KERN_SUCCESS )
+    {
+        return error;
+    }
+
+    *rss   = info.resident_size;
+    *vsize = info.virtual_size;
+
+    {
+        vm_region_basic_info_data_64_t    b_info;
+        mach_vm_address_t                 address = GLOBAL_SHARED_TEXT_SEGMENT;
+        mach_vm_size_t                    size;
+        mach_port_t                       object_name;
+
+        /*
+         * try to determine if this task has the split libraries
+         * mapped in... if so, adjust its virtual size down by
+         * the 2 segments that are used for split libraries
+         */
+        info_count = VM_REGION_BASIC_INFO_COUNT_64;
+
+        error = mach_vm_region(
+                    mach_task_self(),
+                    &address,
+                    &size,
+                    VM_REGION_BASIC_INFO_64,
+                    (vm_region_info_t) &b_info,
+                    &info_count,
+                    &object_name);
+
+        if (error == KERN_SUCCESS) {
+            if (b_info.reserved && size == (SHARED_TEXT_REGION_SIZE) &&
+                *vsize > (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE)) {
+                    *vsize -= (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE);
+            }
+        }
+    }
+
+    return 0;
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go
new file mode 100644
index 0000000..9ac53f9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go
@@ -0,0 +1,51 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin && !ios && cgo
+
+package prometheus
+
+/*
+int get_memory_info(unsigned long long *rss, unsigned long long *vs);
+*/
+import "C"
+import "fmt"
+
+func getMemory() (*memoryInfo, error) {
+	var rss, vsize C.ulonglong
+
+	if err := C.get_memory_info(&rss, &vsize); err != 0 {
+		return nil, fmt.Errorf("task_info() failed with 0x%x", int(err))
+	}
+
+	return &memoryInfo{vsize: uint64(vsize), rss: uint64(rss)}, nil
+}
+
+// describe returns all descriptions of the collector for Darwin.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+	ch <- c.cpuTotal
+	ch <- c.openFDs
+	ch <- c.maxFDs
+	ch <- c.maxVsize
+	ch <- c.startTime
+	ch <- c.rss
+	ch <- c.vsize
+
+	/* the process could be collected but not implemented yet
+	ch <- c.inBytes
+	ch <- c.outBytes
+	*/
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go
new file mode 100644
index 0000000..3788651
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go
@@ -0,0 +1,39 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin && !ios && !cgo
+
+package prometheus
+
+func getMemory() (*memoryInfo, error) {
+	return nil, errNotImplemented
+}
+
+// describe returns all descriptions of the collector for Darwin.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+	ch <- c.cpuTotal
+	ch <- c.openFDs
+	ch <- c.maxFDs
+	ch <- c.maxVsize
+	ch <- c.startTime
+
+	/* the process could be collected but not implemented yet
+	ch <- c.rss
+	ch <- c.vsize
+	ch <- c.inBytes
+	ch <- c.outBytes
+	*/
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go
new file mode 100644
index 0000000..7732b7f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go
@@ -0,0 +1,33 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build wasip1 || js || ios
+// +build wasip1 js ios
+
+package prometheus
+
+func canCollectProcess() bool {
+	return false
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+	c.errorCollectFn(ch)
+}
+
+// describe returns all descriptions of the collector for wasip1 and js.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+	c.errorDescribeFn(ch)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go
similarity index 64%
rename from vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
rename to vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go
index 3117461..8074f70 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go
@@ -11,7 +11,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// +build !windows
+//go:build !windows && !js && !wasip1 && !darwin
+// +build !windows,!js,!wasip1,!darwin
 
 package prometheus
 
@@ -62,4 +63,34 @@
 	} else {
 		c.reportError(ch, nil, err)
 	}
+
+	if netstat, err := p.Netstat(); err == nil {
+		var inOctets, outOctets float64
+		if netstat.InOctets != nil {
+			inOctets = *netstat.InOctets
+		}
+		if netstat.OutOctets != nil {
+			outOctets = *netstat.OutOctets
+		}
+		ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets)
+		ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets)
+	} else {
+		c.reportError(ch, nil, err)
+	}
+}
+
+// describe returns all descriptions of the collector for others than windows, js, wasip1 and darwin.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+	ch <- c.cpuTotal
+	ch <- c.openFDs
+	ch <- c.maxFDs
+	ch <- c.vsize
+	ch <- c.maxVsize
+	ch <- c.rss
+	ch <- c.startTime
+	ch <- c.inBytes
+	ch <- c.outBytes
 }
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
index f973398..fa47428 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
@@ -79,14 +79,10 @@
 }
 
 func (c *processCollector) processCollect(ch chan<- Metric) {
-	h, err := windows.GetCurrentProcess()
-	if err != nil {
-		c.reportError(ch, nil, err)
-		return
-	}
+	h := windows.CurrentProcess()
 
 	var startTime, exitTime, kernelTime, userTime windows.Filetime
-	err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
+	err := windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
 	if err != nil {
 		c.reportError(ch, nil, err)
 		return
@@ -111,6 +107,19 @@
 	ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
 }
 
+// describe returns all descriptions of the collector for windows.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+	ch <- c.cpuTotal
+	ch <- c.openFDs
+	ch <- c.maxFDs
+	ch <- c.vsize
+	ch <- c.rss
+	ch <- c.startTime
+}
+
 func fileTimeToSeconds(ft windows.Filetime) float64 {
 	return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7
 }
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
index e7c0d05..315eab5 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
@@ -76,16 +76,25 @@
 	return n, err
 }
 
-type closeNotifierDelegator struct{ *responseWriterDelegator }
-type flusherDelegator struct{ *responseWriterDelegator }
-type hijackerDelegator struct{ *responseWriterDelegator }
-type readerFromDelegator struct{ *responseWriterDelegator }
-type pusherDelegator struct{ *responseWriterDelegator }
+// Unwrap lets http.ResponseController get the underlying http.ResponseWriter,
+// by implementing the [rwUnwrapper](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/net/http/responsecontroller.go;l=42-44) interface.
+func (r *responseWriterDelegator) Unwrap() http.ResponseWriter {
+	return r.ResponseWriter
+}
+
+type (
+	closeNotifierDelegator struct{ *responseWriterDelegator }
+	flusherDelegator       struct{ *responseWriterDelegator }
+	hijackerDelegator      struct{ *responseWriterDelegator }
+	readerFromDelegator    struct{ *responseWriterDelegator }
+	pusherDelegator        struct{ *responseWriterDelegator }
+)
 
 func (d closeNotifierDelegator) CloseNotify() <-chan bool {
 	//nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
 	return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
 }
+
 func (d flusherDelegator) Flush() {
 	// If applicable, call WriteHeader here so that observeWriteHeader is
 	// handled appropriately.
@@ -94,9 +103,11 @@
 	}
 	d.ResponseWriter.(http.Flusher).Flush()
 }
+
 func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
 	return d.ResponseWriter.(http.Hijacker).Hijack()
 }
+
 func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
 	// If applicable, call WriteHeader here so that observeWriteHeader is
 	// handled appropriately.
@@ -107,6 +118,7 @@
 	d.written += n
 	return n, err
 }
+
 func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
 	return d.ResponseWriter.(http.Pusher).Push(target, opts)
 }
@@ -261,7 +273,7 @@
 			http.Flusher
 		}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
 	}
-	pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
+	pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 23
 		return struct {
 			*responseWriterDelegator
 			http.Pusher
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
index d86d0cf..763d99e 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -33,24 +33,46 @@
 
 import (
 	"compress/gzip"
+	"errors"
 	"fmt"
 	"io"
 	"net/http"
-	"strings"
+	"strconv"
 	"sync"
 	"time"
 
 	"github.com/prometheus/common/expfmt"
 
+	"github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil"
 	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/client_golang/prometheus/promhttp/internal"
 )
 
 const (
-	contentTypeHeader     = "Content-Type"
-	contentEncodingHeader = "Content-Encoding"
-	acceptEncodingHeader  = "Accept-Encoding"
+	contentTypeHeader      = "Content-Type"
+	contentEncodingHeader  = "Content-Encoding"
+	acceptEncodingHeader   = "Accept-Encoding"
+	processStartTimeHeader = "Process-Start-Time-Unix"
 )
 
+// Compression represents the content encodings handlers support for the HTTP
+// responses.
+type Compression string
+
+const (
+	Identity Compression = "identity"
+	Gzip     Compression = "gzip"
+	Zstd     Compression = "zstd"
+)
+
+func defaultCompressionFormats() []Compression {
+	if internal.NewZstdWriter != nil {
+		return []Compression{Identity, Gzip, Zstd}
+	} else {
+		return []Compression{Identity, Gzip}
+	}
+}
+
 var gzipPool = sync.Pool{
 	New: func() interface{} {
 		return gzip.NewWriter(nil)
@@ -84,6 +106,13 @@
 // instrumentation. Use the InstrumentMetricHandler function to apply the same
 // kind of instrumentation as it is used by the Handler function.
 func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
+	return HandlerForTransactional(prometheus.ToTransactionalGatherer(reg), opts)
+}
+
+// HandlerForTransactional is like HandlerFor, but it uses transactional gather, which
+// can safely change in-place returned *dto.MetricFamily before call to `Gather` and after
+// call to `done` of that `Gather`.
+func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerOpts) http.Handler {
 	var (
 		inFlightSem chan struct{}
 		errCnt      = prometheus.NewCounterVec(
@@ -103,7 +132,8 @@
 		errCnt.WithLabelValues("gathering")
 		errCnt.WithLabelValues("encoding")
 		if err := opts.Registry.Register(errCnt); err != nil {
-			if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+			are := &prometheus.AlreadyRegisteredError{}
+			if errors.As(err, are) {
 				errCnt = are.ExistingCollector.(*prometheus.CounterVec)
 			} else {
 				panic(err)
@@ -111,7 +141,22 @@
 		}
 	}
 
+	// Select compression formats to offer based on default or user choice.
+	var compressions []string
+	if !opts.DisableCompression {
+		offers := defaultCompressionFormats()
+		if len(opts.OfferedCompressions) > 0 {
+			offers = opts.OfferedCompressions
+		}
+		for _, comp := range offers {
+			compressions = append(compressions, string(comp))
+		}
+	}
+
 	h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
+		if !opts.ProcessStartTime.IsZero() {
+			rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10))
+		}
 		if inFlightSem != nil {
 			select {
 			case inFlightSem <- struct{}{}: // All good, carry on.
@@ -123,7 +168,8 @@
 				return
 			}
 		}
-		mfs, err := reg.Gather()
+		mfs, done, err := reg.Gather()
+		defer done()
 		if err != nil {
 			if opts.ErrorLog != nil {
 				opts.ErrorLog.Println("error gathering metrics:", err)
@@ -150,22 +196,30 @@
 		} else {
 			contentType = expfmt.Negotiate(req.Header)
 		}
-		header := rsp.Header()
-		header.Set(contentTypeHeader, string(contentType))
+		rsp.Header().Set(contentTypeHeader, string(contentType))
 
-		w := io.Writer(rsp)
-		if !opts.DisableCompression && gzipAccepted(req.Header) {
-			header.Set(contentEncodingHeader, "gzip")
-			gz := gzipPool.Get().(*gzip.Writer)
-			defer gzipPool.Put(gz)
-
-			gz.Reset(w)
-			defer gz.Close()
-
-			w = gz
+		w, encodingHeader, closeWriter, err := negotiateEncodingWriter(req, rsp, compressions)
+		if err != nil {
+			if opts.ErrorLog != nil {
+				opts.ErrorLog.Println("error getting writer", err)
+			}
+			w = io.Writer(rsp)
+			encodingHeader = string(Identity)
 		}
 
-		enc := expfmt.NewEncoder(w, contentType)
+		defer closeWriter()
+
+		// Set Content-Encoding only when data is compressed
+		if encodingHeader != string(Identity) {
+			rsp.Header().Set(contentEncodingHeader, encodingHeader)
+		}
+
+		var enc expfmt.Encoder
+		if opts.EnableOpenMetricsTextCreatedSamples {
+			enc = expfmt.NewEncoder(w, contentType, expfmt.WithCreatedLines())
+		} else {
+			enc = expfmt.NewEncoder(w, contentType)
+		}
 
 		// handleError handles the error according to opts.ErrorHandling
 		// and returns true if we have to abort after the handling.
@@ -242,7 +296,8 @@
 	cnt.WithLabelValues("500")
 	cnt.WithLabelValues("503")
 	if err := reg.Register(cnt); err != nil {
-		if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+		are := &prometheus.AlreadyRegisteredError{}
+		if errors.As(err, are) {
 			cnt = are.ExistingCollector.(*prometheus.CounterVec)
 		} else {
 			panic(err)
@@ -254,7 +309,8 @@
 		Help: "Current number of scrapes being served.",
 	})
 	if err := reg.Register(gge); err != nil {
-		if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+		are := &prometheus.AlreadyRegisteredError{}
+		if errors.As(err, are) {
 			gge = are.ExistingCollector.(prometheus.Gauge)
 		} else {
 			panic(err)
@@ -326,9 +382,19 @@
 	// no effect on the HTTP status code because ErrorHandling is set to
 	// ContinueOnError.
 	Registry prometheus.Registerer
-	// If DisableCompression is true, the handler will never compress the
-	// response, even if requested by the client.
+	// DisableCompression disables the response encoding (compression) and
+	// encoding negotiation. If true, the handler will
+	// never compress the response, even if requested
+	// by the client and the OfferedCompressions field is set.
 	DisableCompression bool
+	// OfferedCompressions is a set of encodings (compressions) handler will
+	// try to offer when negotiating with the client. This defaults to identity, gzip
+	// and zstd.
+	// NOTE: If handler can't agree with the client on the encodings or
+	// unsupported or empty encodings are set in OfferedCompressions,
+	// handler always fallbacks to no compression (identity), for
+	// compatibility reasons. In such cases ErrorLog will be used if set.
+	OfferedCompressions []Compression
 	// The number of concurrent HTTP requests is limited to
 	// MaxRequestsInFlight. Additional requests are responded to with 503
 	// Service Unavailable and a suitable message in the body. If
@@ -354,19 +420,29 @@
 	// (which changes the identity of the resulting series on the Prometheus
 	// server).
 	EnableOpenMetrics bool
-}
-
-// gzipAccepted returns whether the client will accept gzip-encoded content.
-func gzipAccepted(header http.Header) bool {
-	a := header.Get(acceptEncodingHeader)
-	parts := strings.Split(a, ",")
-	for _, part := range parts {
-		part = strings.TrimSpace(part)
-		if part == "gzip" || strings.HasPrefix(part, "gzip;") {
-			return true
-		}
-	}
-	return false
+	// EnableOpenMetricsTextCreatedSamples specifies if this handler should add, extra, synthetic
+	// Created Timestamps for counters, histograms and summaries, which for the current
+	// version of OpenMetrics are defined as extra series with the same name and "_created"
+	// suffix. See also the OpenMetrics specification for more details
+	// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1
+	//
+	// Created timestamps are used to improve the accuracy of reset detection,
+	// but the way it's designed in OpenMetrics 1.0 it also dramatically increases cardinality
+	// if the scraper does not handle those metrics correctly (converting to created timestamp
+	// instead of leaving those series as-is). New OpenMetrics versions might improve
+	// this situation.
+	//
+	// Prometheus introduced the feature flag 'created-timestamp-zero-ingestion'
+	// in version 2.50.0 to handle this situation.
+	EnableOpenMetricsTextCreatedSamples bool
+	// ProcessStartTime allows setting process start timevalue that will be exposed
+	// with "Process-Start-Time-Unix" response header along with the metrics
+	// payload. This allow callers to have efficient transformations to cumulative
+	// counters (e.g. OpenTelemetry) or generally _created timestamp estimation per
+	// scrape target.
+	// NOTE: This feature is experimental and not covered by OpenMetrics or Prometheus
+	// exposition format.
+	ProcessStartTime time.Time
 }
 
 // httpError removes any content-encoding header and then calls http.Error with
@@ -381,3 +457,36 @@
 		http.StatusInternalServerError,
 	)
 }
+
+// negotiateEncodingWriter reads the Accept-Encoding header from a request and
+// selects the right compression based on an allow-list of supported
+// compressions. It returns a writer implementing the compression and an the
+// correct value that the caller can set in the response header.
+func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []string) (_ io.Writer, encodingHeaderValue string, closeWriter func(), _ error) {
+	if len(compressions) == 0 {
+		return rw, string(Identity), func() {}, nil
+	}
+
+	// TODO(mrueg): Replace internal/github.com/gddo once https://github.com/golang/go/issues/19307 is implemented.
+	selected := httputil.NegotiateContentEncoding(r, compressions)
+
+	switch selected {
+	case "zstd":
+		if internal.NewZstdWriter == nil {
+			// The content encoding was not implemented yet.
+			return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats())
+		}
+		writer, closeWriter, err := internal.NewZstdWriter(rw)
+		return writer, selected, closeWriter, err
+	case "gzip":
+		gz := gzipPool.Get().(*gzip.Writer)
+		gz.Reset(rw)
+		return gz, selected, func() { _ = gz.Close(); gzipPool.Put(gz) }, nil
+	case "identity":
+		// This means the content is not compressed.
+		return rw, selected, func() {}, nil
+	default:
+		// The content encoding was not implemented yet.
+		return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats())
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
index 861b4d2..d3482c4 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
@@ -38,11 +38,11 @@
 //
 // See the example for ExampleInstrumentRoundTripperDuration for example usage.
 func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
-	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+	return func(r *http.Request) (*http.Response, error) {
 		gauge.Inc()
 		defer gauge.Dec()
 		return next.RoundTrip(r)
-	})
+	}
 }
 
 // InstrumentRoundTripperCounter is a middleware that wraps the provided
@@ -59,22 +59,29 @@
 // If the wrapped RoundTripper panics or returns a non-nil error, the Counter
 // is not incremented.
 //
+// Use with WithExemplarFromContext to instrument the exemplars on the counter of requests.
+//
 // See the example for ExampleInstrumentRoundTripperDuration for example usage.
 func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
-	rtOpts := &option{}
+	rtOpts := defaultOptions()
 	for _, o := range opts {
-		o(rtOpts)
+		o.apply(rtOpts)
 	}
 
-	code, method := checkLabels(counter)
+	// Curry the counter with dynamic labels before checking the remaining labels.
+	code, method := checkLabels(counter.MustCurryWith(rtOpts.emptyDynamicLabels()))
 
-	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+	return func(r *http.Request) (*http.Response, error) {
 		resp, err := next.RoundTrip(r)
 		if err == nil {
-			counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc()
+			l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
+			for label, resolve := range rtOpts.extraLabelsFromCtx {
+				l[label] = resolve(resp.Request.Context())
+			}
+			addWithExemplar(counter.With(l), 1, rtOpts.getExemplarFn(r.Context()))
 		}
 		return resp, err
-	})
+	}
 }
 
 // InstrumentRoundTripperDuration is a middleware that wraps the provided
@@ -94,24 +101,31 @@
 // If the wrapped RoundTripper panics or returns a non-nil error, no values are
 // reported.
 //
+// Use with WithExemplarFromContext to instrument the exemplars on the duration histograms.
+//
 // Note that this method is only guaranteed to never observe negative durations
 // if used with Go1.9+.
 func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
-	rtOpts := &option{}
+	rtOpts := defaultOptions()
 	for _, o := range opts {
-		o(rtOpts)
+		o.apply(rtOpts)
 	}
 
-	code, method := checkLabels(obs)
+	// Curry the observer with dynamic labels before checking the remaining labels.
+	code, method := checkLabels(obs.MustCurryWith(rtOpts.emptyDynamicLabels()))
 
-	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+	return func(r *http.Request) (*http.Response, error) {
 		start := time.Now()
 		resp, err := next.RoundTrip(r)
 		if err == nil {
-			obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Observe(time.Since(start).Seconds())
+			l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
+			for label, resolve := range rtOpts.extraLabelsFromCtx {
+				l[label] = resolve(resp.Request.Context())
+			}
+			observeWithExemplar(obs.With(l), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context()))
 		}
 		return resp, err
-	})
+	}
 }
 
 // InstrumentTrace is used to offer flexibility in instrumenting the available
@@ -149,7 +163,7 @@
 //
 // See the example for ExampleInstrumentRoundTripperDuration for example usage.
 func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
-	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+	return func(r *http.Request) (*http.Response, error) {
 		start := time.Now()
 
 		trace := &httptrace.ClientTrace{
@@ -231,5 +245,5 @@
 		r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace))
 
 		return next.RoundTrip(r)
-	})
+	}
 }
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
index a23f0ed..9332b02 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
@@ -28,6 +28,26 @@
 // magicString is used for the hacky label test in checkLabels. Remove once fixed.
 const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
 
+// observeWithExemplar is a wrapper for [prometheus.ExemplarAdder.ExemplarObserver],
+// which falls back to [prometheus.Observer.Observe] if no labels are provided.
+func observeWithExemplar(obs prometheus.Observer, val float64, labels map[string]string) {
+	if labels == nil {
+		obs.Observe(val)
+		return
+	}
+	obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels)
+}
+
+// addWithExemplar is a wrapper for [prometheus.ExemplarAdder.AddWithExemplar],
+// which falls back to [prometheus.Counter.Add] if no labels are provided.
+func addWithExemplar(obs prometheus.Counter, val float64, labels map[string]string) {
+	if labels == nil {
+		obs.Add(val)
+		return
+	}
+	obs.(prometheus.ExemplarAdder).AddWithExemplar(val, labels)
+}
+
 // InstrumentHandlerInFlight is a middleware that wraps the provided
 // http.Handler. It sets the provided prometheus.Gauge to the number of
 // requests currently handled by the wrapped http.Handler.
@@ -48,7 +68,7 @@
 // names are "code" and "method". The function panics otherwise. For the "method"
 // label a predefined default label value set is used to filter given values.
 // Values besides predefined values will count as `unknown` method.
-//`WithExtraMethods` can be used to add more methods to the set. The Observe
+// `WithExtraMethods` can be used to add more methods to the set. The Observe
 // method of the Observer in the ObserverVec is called with the request duration
 // in seconds. Partitioning happens by HTTP status code and/or HTTP method if
 // the respective instance label names are present in the ObserverVec. For
@@ -62,28 +82,37 @@
 // Note that this method is only guaranteed to never observe negative durations
 // if used with Go1.9+.
 func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
-	mwOpts := &option{}
+	hOpts := defaultOptions()
 	for _, o := range opts {
-		o(mwOpts)
+		o.apply(hOpts)
 	}
 
-	code, method := checkLabels(obs)
+	// Curry the observer with dynamic labels before checking the remaining labels.
+	code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
 
 	if code {
-		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		return func(w http.ResponseWriter, r *http.Request) {
 			now := time.Now()
 			d := newDelegator(w, nil)
 			next.ServeHTTP(d, r)
 
-			obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
-		})
+			l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+			for label, resolve := range hOpts.extraLabelsFromCtx {
+				l[label] = resolve(r.Context())
+			}
+			observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
+		}
 	}
 
-	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+	return func(w http.ResponseWriter, r *http.Request) {
 		now := time.Now()
 		next.ServeHTTP(w, r)
-		obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
-	})
+		l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
+		for label, resolve := range hOpts.extraLabelsFromCtx {
+			l[label] = resolve(r.Context())
+		}
+		observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
+	}
 }
 
 // InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
@@ -104,25 +133,36 @@
 //
 // See the example for InstrumentHandlerDuration for example usage.
 func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc {
-	mwOpts := &option{}
+	hOpts := defaultOptions()
 	for _, o := range opts {
-		o(mwOpts)
+		o.apply(hOpts)
 	}
 
-	code, method := checkLabels(counter)
+	// Curry the counter with dynamic labels before checking the remaining labels.
+	code, method := checkLabels(counter.MustCurryWith(hOpts.emptyDynamicLabels()))
 
 	if code {
-		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		return func(w http.ResponseWriter, r *http.Request) {
 			d := newDelegator(w, nil)
 			next.ServeHTTP(d, r)
-			counter.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Inc()
-		})
+
+			l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+			for label, resolve := range hOpts.extraLabelsFromCtx {
+				l[label] = resolve(r.Context())
+			}
+			addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
+		}
 	}
 
-	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+	return func(w http.ResponseWriter, r *http.Request) {
 		next.ServeHTTP(w, r)
-		counter.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Inc()
-	})
+
+		l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
+		for label, resolve := range hOpts.extraLabelsFromCtx {
+			l[label] = resolve(r.Context())
+		}
+		addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
+	}
 }
 
 // InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
@@ -148,20 +188,25 @@
 //
 // See the example for InstrumentHandlerDuration for example usage.
 func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
-	mwOpts := &option{}
+	hOpts := defaultOptions()
 	for _, o := range opts {
-		o(mwOpts)
+		o.apply(hOpts)
 	}
 
-	code, method := checkLabels(obs)
+	// Curry the observer with dynamic labels before checking the remaining labels.
+	code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
 
-	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+	return func(w http.ResponseWriter, r *http.Request) {
 		now := time.Now()
 		d := newDelegator(w, func(status int) {
-			obs.With(labels(code, method, r.Method, status, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
+			l := labels(code, method, r.Method, status, hOpts.extraMethods...)
+			for label, resolve := range hOpts.extraLabelsFromCtx {
+				l[label] = resolve(r.Context())
+			}
+			observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
 		})
 		next.ServeHTTP(d, r)
-	})
+	}
 }
 
 // InstrumentHandlerRequestSize is a middleware that wraps the provided
@@ -184,27 +229,38 @@
 //
 // See the example for InstrumentHandlerDuration for example usage.
 func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
-	mwOpts := &option{}
+	hOpts := defaultOptions()
 	for _, o := range opts {
-		o(mwOpts)
+		o.apply(hOpts)
 	}
 
-	code, method := checkLabels(obs)
+	// Curry the observer with dynamic labels before checking the remaining labels.
+	code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
 
 	if code {
-		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		return func(w http.ResponseWriter, r *http.Request) {
 			d := newDelegator(w, nil)
 			next.ServeHTTP(d, r)
 			size := computeApproximateRequestSize(r)
-			obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(size))
-		})
+
+			l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+			for label, resolve := range hOpts.extraLabelsFromCtx {
+				l[label] = resolve(r.Context())
+			}
+			observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
+		}
 	}
 
-	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+	return func(w http.ResponseWriter, r *http.Request) {
 		next.ServeHTTP(w, r)
 		size := computeApproximateRequestSize(r)
-		obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(float64(size))
-	})
+
+		l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
+		for label, resolve := range hOpts.extraLabelsFromCtx {
+			l[label] = resolve(r.Context())
+		}
+		observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
+	}
 }
 
 // InstrumentHandlerResponseSize is a middleware that wraps the provided
@@ -227,17 +283,23 @@
 //
 // See the example for InstrumentHandlerDuration for example usage.
 func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler {
-	mwOpts := &option{}
+	hOpts := defaultOptions()
 	for _, o := range opts {
-		o(mwOpts)
+		o.apply(hOpts)
 	}
 
-	code, method := checkLabels(obs)
+	// Curry the observer with dynamic labels before checking the remaining labels.
+	code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
 
 	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 		d := newDelegator(w, nil)
 		next.ServeHTTP(d, r)
-		obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(d.Written()))
+
+		l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+		for label, resolve := range hOpts.extraLabelsFromCtx {
+			l[label] = resolve(r.Context())
+		}
+		observeWithExemplar(obs.With(l), float64(d.Written()), hOpts.getExemplarFn(r.Context()))
 	})
 }
 
@@ -246,7 +308,7 @@
 // Collector does not have a Desc or has more than one Desc or its Desc is
 // invalid. It also panics if the Collector has any non-const, non-curried
 // labels that are not named "code" or "method".
-func checkLabels(c prometheus.Collector) (code bool, method bool) {
+func checkLabels(c prometheus.Collector) (code, method bool) {
 	// TODO(beorn7): Remove this hacky way to check for instance labels
 	// once Descriptors can have their dimensionality queried.
 	var (
@@ -327,16 +389,13 @@
 	return true
 }
 
-// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
-// unnecessary allocations on each request.
-var emptyLabels = prometheus.Labels{}
-
 func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels {
-	if !(code || method) {
-		return emptyLabels
-	}
 	labels := prometheus.Labels{}
 
+	if !code && !method {
+		return labels
+	}
+
 	if code {
 		labels["code"] = sanitizeCode(status)
 	}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/internal/compression.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/internal/compression.go
new file mode 100644
index 0000000..c503959
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/internal/compression.go
@@ -0,0 +1,21 @@
+// Copyright 2025 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+	"io"
+)
+
+// NewZstdWriter enables zstd write support if non-nil.
+var NewZstdWriter func(rw io.Writer) (_ io.Writer, closeWriter func(), _ error)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
index 35e41bd..5d4383a 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
@@ -13,19 +13,72 @@
 
 package promhttp
 
-// Option are used to configure a middleware or round tripper..
-type Option func(*option)
+import (
+	"context"
 
-type option struct {
-	extraMethods []string
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+// Option are used to configure both handler (middleware) or round tripper.
+type Option interface {
+	apply(*options)
 }
 
+// LabelValueFromCtx are used to compute the label value from request context.
+// Context can be filled with values from request through middleware.
+type LabelValueFromCtx func(ctx context.Context) string
+
+// options store options for both a handler or round tripper.
+type options struct {
+	extraMethods       []string
+	getExemplarFn      func(requestCtx context.Context) prometheus.Labels
+	extraLabelsFromCtx map[string]LabelValueFromCtx
+}
+
+func defaultOptions() *options {
+	return &options{
+		getExemplarFn:      func(ctx context.Context) prometheus.Labels { return nil },
+		extraLabelsFromCtx: map[string]LabelValueFromCtx{},
+	}
+}
+
+func (o *options) emptyDynamicLabels() prometheus.Labels {
+	labels := prometheus.Labels{}
+
+	for label := range o.extraLabelsFromCtx {
+		labels[label] = ""
+	}
+
+	return labels
+}
+
+type optionApplyFunc func(*options)
+
+func (o optionApplyFunc) apply(opt *options) { o(opt) }
+
 // WithExtraMethods adds additional HTTP methods to the list of allowed methods.
 // See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list.
 //
 // See the example for ExampleInstrumentHandlerWithExtraMethods for example usage.
 func WithExtraMethods(methods ...string) Option {
-	return func(o *option) {
+	return optionApplyFunc(func(o *options) {
 		o.extraMethods = methods
-	}
+	})
+}
+
+// WithExemplarFromContext allows to inject function that will get exemplar from context that will be put to counter and histogram metrics.
+// If the function returns nil labels or the metric does not support exemplars, no exemplar will be added (noop), but
+// metric will continue to observe/increment.
+func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option {
+	return optionApplyFunc(func(o *options) {
+		o.getExemplarFn = getExemplarFn
+	})
+}
+
+// WithLabelFromCtx registers a label for dynamic resolution with access to context.
+// See the example for ExampleInstrumentHandlerWithLabelResolver for example usage
+func WithLabelFromCtx(name string, valueFn LabelValueFromCtx) Option {
+	return optionApplyFunc(func(o *options) {
+		o.extraLabelsFromCtx[name] = valueFn
+	})
 }
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
index 383a7f5..c6fd2f5 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -15,24 +15,23 @@
 
 import (
 	"bytes"
+	"errors"
 	"fmt"
-	"io/ioutil"
 	"os"
 	"path/filepath"
 	"runtime"
 	"sort"
+	"strconv"
 	"strings"
 	"sync"
 	"unicode/utf8"
 
-	"github.com/cespare/xxhash/v2"
-	//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
-	"github.com/golang/protobuf/proto"
-	"github.com/prometheus/common/expfmt"
-
-	dto "github.com/prometheus/client_model/go"
-
 	"github.com/prometheus/client_golang/prometheus/internal"
+
+	"github.com/cespare/xxhash/v2"
+	dto "github.com/prometheus/client_model/go"
+	"github.com/prometheus/common/expfmt"
+	"google.golang.org/protobuf/proto"
 )
 
 const (
@@ -252,9 +251,12 @@
 }
 
 // Registry registers Prometheus collectors, collects their metrics, and gathers
-// them into MetricFamilies for exposition. It implements both Registerer and
-// Gatherer. The zero value is not usable. Create instances with NewRegistry or
-// NewPedanticRegistry.
+// them into MetricFamilies for exposition. It implements Registerer, Gatherer,
+// and Collector. The zero value is not usable. Create instances with
+// NewRegistry or NewPedanticRegistry.
+//
+// Registry implements Collector to allow it to be used for creating groups of
+// metrics. See the Grouping example for how this can be done.
 type Registry struct {
 	mtx                   sync.RWMutex
 	collectorsByID        map[uint64]Collector // ID is a hash of the descIDs.
@@ -289,7 +291,7 @@
 
 		// Is the descriptor valid at all?
 		if desc.err != nil {
-			return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
+			return fmt.Errorf("descriptor %s is invalid: %w", desc, desc.err)
 		}
 
 		// Is the descID unique?
@@ -312,16 +314,17 @@
 			if dimHash != desc.dimHash {
 				return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
 			}
-		} else {
-			// ...then check the new descriptors already seen.
-			if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
-				if dimHash != desc.dimHash {
-					return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
-				}
-			} else {
-				newDimHashesByName[desc.fqName] = desc.dimHash
-			}
+			continue
 		}
+
+		// ...then check the new descriptors already seen.
+		if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
+			if dimHash != desc.dimHash {
+				return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
+			}
+			continue
+		}
+		newDimHashesByName[desc.fqName] = desc.dimHash
 	}
 	// A Collector yielding no Desc at all is considered unchecked.
 	if len(newDescIDs) == 0 {
@@ -407,6 +410,14 @@
 
 // Gather implements Gatherer.
 func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
+	r.mtx.RLock()
+
+	if len(r.collectorsByID) == 0 && len(r.uncheckedCollectors) == 0 {
+		// Fast path.
+		r.mtx.RUnlock()
+		return nil, nil
+	}
+
 	var (
 		checkedMetricChan   = make(chan Metric, capMetricChan)
 		uncheckedMetricChan = make(chan Metric, capMetricChan)
@@ -416,7 +427,6 @@
 		registeredDescIDs   map[uint64]struct{} // Only used for pedantic checks
 	)
 
-	r.mtx.RLock()
 	goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
 	metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
 	checkedCollectors := make(chan Collector, len(r.collectorsByID))
@@ -539,7 +549,7 @@
 			goroutineBudget--
 			runtime.Gosched()
 		}
-		// Once both checkedMetricChan and uncheckdMetricChan are closed
+		// Once both checkedMetricChan and uncheckedMetricChan are closed
 		// and drained, the contraption above will nil out cmc and umc,
 		// and then we can leave the collect loop here.
 		if cmc == nil && umc == nil {
@@ -549,6 +559,31 @@
 	return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
 }
 
+// Describe implements Collector.
+func (r *Registry) Describe(ch chan<- *Desc) {
+	r.mtx.RLock()
+	defer r.mtx.RUnlock()
+
+	// Only report the checked Collectors; unchecked collectors don't report any
+	// Desc.
+	for _, c := range r.collectorsByID {
+		c.Describe(ch)
+	}
+}
+
+// Collect implements Collector.
+func (r *Registry) Collect(ch chan<- Metric) {
+	r.mtx.RLock()
+	defer r.mtx.RUnlock()
+
+	for _, c := range r.collectorsByID {
+		c.Collect(ch)
+	}
+	for _, c := range r.uncheckedCollectors {
+		c.Collect(ch)
+	}
+}
+
 // WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the
 // Prometheus text format, and writes it to a temporary file. Upon success, the
 // temporary file is renamed to the provided filename.
@@ -556,7 +591,7 @@
 // This is intended for use with the textfile collector of the node exporter.
 // Note that the node exporter expects the filename to be suffixed with ".prom".
 func WriteToTextfile(filename string, g Gatherer) error {
-	tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename))
+	tmp, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename))
 	if err != nil {
 		return err
 	}
@@ -575,7 +610,7 @@
 		return err
 	}
 
-	if err := os.Chmod(tmp.Name(), 0644); err != nil {
+	if err := os.Chmod(tmp.Name(), 0o644); err != nil {
 		return err
 	}
 	return os.Rename(tmp.Name(), filename)
@@ -596,7 +631,7 @@
 	}
 	dtoMetric := &dto.Metric{}
 	if err := metric.Write(dtoMetric); err != nil {
-		return fmt.Errorf("error collecting metric %v: %s", desc, err)
+		return fmt.Errorf("error collecting metric %v: %w", desc, err)
 	}
 	metricFamily, ok := metricFamiliesByName[desc.fqName]
 	if ok { // Existing name.
@@ -718,12 +753,13 @@
 	for i, g := range gs {
 		mfs, err := g.Gather()
 		if err != nil {
-			if multiErr, ok := err.(MultiError); ok {
+			multiErr := MultiError{}
+			if errors.As(err, &multiErr) {
 				for _, err := range multiErr {
-					errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+					errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err))
 				}
 			} else {
-				errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+				errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err))
 			}
 		}
 		for _, mf := range mfs {
@@ -884,11 +920,11 @@
 	h.Write(separatorByteSlice)
 	// Make sure label pairs are sorted. We depend on it for the consistency
 	// check.
-	if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) {
+	if !sort.IsSorted(internal.LabelPairSorter(dtoMetric.Label)) {
 		// We cannot sort dtoMetric.Label in place as it is immutable by contract.
 		copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label))
 		copy(copiedLabels, dtoMetric.Label)
-		sort.Sort(labelPairSorter(copiedLabels))
+		sort.Sort(internal.LabelPairSorter(copiedLabels))
 		dtoMetric.Label = copiedLabels
 	}
 	for _, lp := range dtoMetric.Label {
@@ -897,6 +933,10 @@
 		h.WriteString(lp.GetValue())
 		h.Write(separatorByteSlice)
 	}
+	if dtoMetric.TimestampMs != nil {
+		h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10))
+		h.Write(separatorByteSlice)
+	}
 	hSum := h.Sum64()
 	if _, exists := metricHashes[hSum]; exists {
 		return fmt.Errorf(
@@ -924,7 +964,7 @@
 	// Is the desc consistent with the content of the metric?
 	lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label))
 	copy(lpsFromDesc, desc.constLabelPairs)
-	for _, l := range desc.variableLabels {
+	for _, l := range desc.variableLabels.names {
 		lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
 			Name: proto.String(l),
 		})
@@ -935,7 +975,7 @@
 			metricFamily.GetName(), dtoMetric, desc,
 		)
 	}
-	sort.Sort(labelPairSorter(lpsFromDesc))
+	sort.Sort(internal.LabelPairSorter(lpsFromDesc))
 	for i, lpFromDesc := range lpsFromDesc {
 		lpFromMetric := dtoMetric.Label[i]
 		if lpFromDesc.GetName() != lpFromMetric.GetName() ||
@@ -948,3 +988,89 @@
 	}
 	return nil
 }
+
+var _ TransactionalGatherer = &MultiTRegistry{}
+
+// MultiTRegistry is a TransactionalGatherer that joins gathered metrics from multiple
+// transactional gatherers.
+//
+// It is caller responsibility to ensure two registries have mutually exclusive metric families,
+// no deduplication will happen.
+type MultiTRegistry struct {
+	tGatherers []TransactionalGatherer
+}
+
+// NewMultiTRegistry creates MultiTRegistry.
+func NewMultiTRegistry(tGatherers ...TransactionalGatherer) *MultiTRegistry {
+	return &MultiTRegistry{
+		tGatherers: tGatherers,
+	}
+}
+
+// Gather implements TransactionalGatherer interface.
+func (r *MultiTRegistry) Gather() (mfs []*dto.MetricFamily, done func(), err error) {
+	errs := MultiError{}
+
+	dFns := make([]func(), 0, len(r.tGatherers))
+	// TODO(bwplotka): Implement concurrency for those?
+	for _, g := range r.tGatherers {
+		// TODO(bwplotka): Check for duplicates?
+		m, d, err := g.Gather()
+		errs.Append(err)
+
+		mfs = append(mfs, m...)
+		dFns = append(dFns, d)
+	}
+
+	// TODO(bwplotka): Consider sort in place, given metric family in gather is sorted already.
+	sort.Slice(mfs, func(i, j int) bool {
+		return *mfs[i].Name < *mfs[j].Name
+	})
+	return mfs, func() {
+		for _, d := range dFns {
+			d()
+		}
+	}, errs.MaybeUnwrap()
+}
+
+// TransactionalGatherer represents transactional gatherer that can be triggered to notify gatherer that memory
+// used by metric family is no longer used by a caller. This allows implementations with cache.
+type TransactionalGatherer interface {
+	// Gather returns metrics in a lexicographically sorted slice
+	// of uniquely named MetricFamily protobufs. Gather ensures that the
+	// returned slice is valid and self-consistent so that it can be used
+	// for valid exposition. As an exception to the strict consistency
+	// requirements described for metric.Desc, Gather will tolerate
+	// different sets of label names for metrics of the same metric family.
+	//
+	// Even if an error occurs, Gather attempts to gather as many metrics as
+	// possible. Hence, if a non-nil error is returned, the returned
+	// MetricFamily slice could be nil (in case of a fatal error that
+	// prevented any meaningful metric collection) or contain a number of
+	// MetricFamily protobufs, some of which might be incomplete, and some
+	// might be missing altogether. The returned error (which might be a
+	// MultiError) explains the details. Note that this is mostly useful for
+	// debugging purposes. If the gathered protobufs are to be used for
+	// exposition in actual monitoring, it is almost always better to not
+	// expose an incomplete result and instead disregard the returned
+	// MetricFamily protobufs in case the returned error is non-nil.
+	//
+	// Important: done is expected to be triggered (even if the error occurs!)
+	// once caller does not need returned slice of dto.MetricFamily.
+	Gather() (_ []*dto.MetricFamily, done func(), err error)
+}
+
+// ToTransactionalGatherer transforms Gatherer to transactional one with noop as done function.
+func ToTransactionalGatherer(g Gatherer) TransactionalGatherer {
+	return &noTransactionGatherer{g: g}
+}
+
+type noTransactionGatherer struct {
+	g Gatherer
+}
+
+// Gather implements TransactionalGatherer interface.
+func (g *noTransactionGatherer) Gather() (_ []*dto.MetricFamily, done func(), err error) {
+	mfs, err := g.g.Gather()
+	return mfs, func() {}, err
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
index c5fa8ed..ac5203c 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -22,11 +22,11 @@
 	"sync/atomic"
 	"time"
 
-	"github.com/beorn7/perks/quantile"
-	//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
-	"github.com/golang/protobuf/proto"
-
 	dto "github.com/prometheus/client_model/go"
+
+	"github.com/beorn7/perks/quantile"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/types/known/timestamppb"
 )
 
 // quantileLabel is used for the label that defines the quantile in a
@@ -146,6 +146,21 @@
 	// is the internal buffer size of the underlying package
 	// "github.com/bmizerany/perks/quantile").
 	BufCap uint32
+
+	// now is for testing purposes, by default it's time.Now.
+	now func() time.Time
+}
+
+// SummaryVecOpts bundles the options to create a SummaryVec metric.
+// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type SummaryVecOpts struct {
+	SummaryOpts
+
+	// VariableLabels are used to partition the metric vector by the given set
+	// of labels. Each label value will be constrained with the optional Constraint
+	// function, if provided.
+	VariableLabels ConstrainableLabels
 }
 
 // Problem with the sliding-window decay algorithm... The Merge method of
@@ -177,11 +192,11 @@
 }
 
 func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
-	if len(desc.variableLabels) != len(labelValues) {
-		panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
+	if len(desc.variableLabels.names) != len(labelValues) {
+		panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
 	}
 
-	for _, n := range desc.variableLabels {
+	for _, n := range desc.variableLabels.names {
 		if n == quantileLabel {
 			panic(errQuantileLabelNotAllowed)
 		}
@@ -211,6 +226,9 @@
 		opts.BufCap = DefBufCap
 	}
 
+	if opts.now == nil {
+		opts.now = time.Now
+	}
 	if len(opts.Objectives) == 0 {
 		// Use the lock-free implementation of a Summary without objectives.
 		s := &noObjectivesSummary{
@@ -219,11 +237,13 @@
 			counts:     [2]*summaryCounts{{}, {}},
 		}
 		s.init(s) // Init self-collection.
+		s.createdTs = timestamppb.New(opts.now())
 		return s
 	}
 
 	s := &summary{
 		desc: desc,
+		now:  opts.now,
 
 		objectives:       opts.Objectives,
 		sortedObjectives: make([]float64, 0, len(opts.Objectives)),
@@ -234,7 +254,7 @@
 		coldBuf:        make([]float64, 0, opts.BufCap),
 		streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
 	}
-	s.headStreamExpTime = time.Now().Add(s.streamDuration)
+	s.headStreamExpTime = opts.now().Add(s.streamDuration)
 	s.hotBufExpTime = s.headStreamExpTime
 
 	for i := uint32(0); i < opts.AgeBuckets; i++ {
@@ -248,6 +268,7 @@
 	sort.Float64s(s.sortedObjectives)
 
 	s.init(s) // Init self-collection.
+	s.createdTs = timestamppb.New(opts.now())
 	return s
 }
 
@@ -260,6 +281,8 @@
 
 	desc *Desc
 
+	now func() time.Time
+
 	objectives       map[float64]float64
 	sortedObjectives []float64
 
@@ -275,6 +298,8 @@
 	headStream                       *quantile.Stream
 	headStreamIdx                    int
 	headStreamExpTime, hotBufExpTime time.Time
+
+	createdTs *timestamppb.Timestamp
 }
 
 func (s *summary) Desc() *Desc {
@@ -285,7 +310,7 @@
 	s.bufMtx.Lock()
 	defer s.bufMtx.Unlock()
 
-	now := time.Now()
+	now := s.now()
 	if now.After(s.hotBufExpTime) {
 		s.asyncFlush(now)
 	}
@@ -296,13 +321,15 @@
 }
 
 func (s *summary) Write(out *dto.Metric) error {
-	sum := &dto.Summary{}
+	sum := &dto.Summary{
+		CreatedTimestamp: s.createdTs,
+	}
 	qs := make([]*dto.Quantile, 0, len(s.objectives))
 
 	s.bufMtx.Lock()
 	s.mtx.Lock()
 	// Swap bufs even if hotBuf is empty to set new hotBufExpTime.
-	s.swapBufs(time.Now())
+	s.swapBufs(s.now())
 	s.bufMtx.Unlock()
 
 	s.flushColdBuf()
@@ -429,6 +456,8 @@
 	counts [2]*summaryCounts
 
 	labelPairs []*dto.LabelPair
+
+	createdTs *timestamppb.Timestamp
 }
 
 func (s *noObjectivesSummary) Desc() *Desc {
@@ -479,8 +508,9 @@
 	}
 
 	sum := &dto.Summary{
-		SampleCount: proto.Uint64(count),
-		SampleSum:   proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+		SampleCount:      proto.Uint64(count),
+		SampleSum:        proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+		CreatedTimestamp: s.createdTs,
 	}
 
 	out.Summary = sum
@@ -530,20 +560,28 @@
 // it is handled by the Prometheus server internally, “quantile” is an illegal
 // label name. NewSummaryVec will panic if this label name is used.
 func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
-	for _, ln := range labelNames {
+	return V2.NewSummaryVec(SummaryVecOpts{
+		SummaryOpts:    opts,
+		VariableLabels: UnconstrainedLabels(labelNames),
+	})
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts.
+func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec {
+	for _, ln := range opts.VariableLabels.labelNames() {
 		if ln == quantileLabel {
 			panic(errQuantileLabelNotAllowed)
 		}
 	}
-	desc := NewDesc(
+	desc := V2.NewDesc(
 		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
 		opts.Help,
-		labelNames,
+		opts.VariableLabels,
 		opts.ConstLabels,
 	)
 	return &SummaryVec{
 		MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
-			return newSummary(desc, opts, lvs...)
+			return newSummary(desc, opts.SummaryOpts, lvs...)
 		}),
 	}
 }
@@ -603,7 +641,8 @@
 // WithLabelValues works as GetMetricWithLabelValues, but panics where
 // GetMetricWithLabelValues would have returned an error. Not returning an
 // error allows shortcuts like
-//     myVec.WithLabelValues("404", "GET").Observe(42.21)
+//
+//	myVec.WithLabelValues("404", "GET").Observe(42.21)
 func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
 	s, err := v.GetMetricWithLabelValues(lvs...)
 	if err != nil {
@@ -614,7 +653,8 @@
 
 // With works as GetMetricWith, but panics where GetMetricWithLabels would have
 // returned an error. Not returning an error allows shortcuts like
-//     myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+//
+//	myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
 func (v *SummaryVec) With(labels Labels) Observer {
 	s, err := v.GetMetricWith(labels)
 	if err != nil {
@@ -660,6 +700,7 @@
 	sum        float64
 	quantiles  map[float64]float64
 	labelPairs []*dto.LabelPair
+	createdTs  *timestamppb.Timestamp
 }
 
 func (s *constSummary) Desc() *Desc {
@@ -667,7 +708,9 @@
 }
 
 func (s *constSummary) Write(out *dto.Metric) error {
-	sum := &dto.Summary{}
+	sum := &dto.Summary{
+		CreatedTimestamp: s.createdTs,
+	}
 	qs := make([]*dto.Quantile, 0, len(s.quantiles))
 
 	sum.SampleCount = proto.Uint64(s.count)
@@ -701,7 +744,8 @@
 //
 // quantiles maps ranks to quantile values. For example, a median latency of
 // 0.23s and a 99th percentile latency of 0.56s would be expressed as:
-//     map[float64]float64{0.5: 0.23, 0.99: 0.56}
+//
+//	map[float64]float64{0.5: 0.23, 0.99: 0.56}
 //
 // NewConstSummary returns an error if the length of labelValues is not
 // consistent with the variable labels in Desc or if Desc is invalid.
@@ -715,7 +759,7 @@
 	if desc.err != nil {
 		return nil, desc.err
 	}
-	if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
 		return nil, err
 	}
 	return &constSummary{
@@ -742,3 +786,45 @@
 	}
 	return m
 }
+
+// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp.
+func NewConstSummaryWithCreatedTimestamp(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	quantiles map[float64]float64,
+	ct time.Time,
+	labelValues ...string,
+) (Metric, error) {
+	if desc.err != nil {
+		return nil, desc.err
+	}
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+		return nil, err
+	}
+	return &constSummary{
+		desc:       desc,
+		count:      count,
+		sum:        sum,
+		quantiles:  quantiles,
+		labelPairs: MakeLabelPairs(desc, labelValues),
+		createdTs:  timestamppb.New(ct),
+	}, nil
+}
+
+// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where
+// NewConstSummaryWithCreatedTimestamp would have returned an error.
+func MustNewConstSummaryWithCreatedTimestamp(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	quantiles map[float64]float64,
+	ct time.Time,
+	labelValues ...string,
+) Metric {
+	m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...)
+	if err != nil {
+		panic(err)
+	}
+	return m
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
index 8d5f105..52344fe 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
@@ -23,13 +23,24 @@
 }
 
 // NewTimer creates a new Timer. The provided Observer is used to observe a
-// duration in seconds. Timer is usually used to time a function call in the
+// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar
+// later on will be also supported.
+// Timer is usually used to time a function call in the
 // following way:
-//    func TimeMe() {
-//        timer := NewTimer(myHistogram)
-//        defer timer.ObserveDuration()
-//        // Do actual work.
-//    }
+//
+//	func TimeMe() {
+//	    timer := NewTimer(myHistogram)
+//	    defer timer.ObserveDuration()
+//	    // Do actual work.
+//	}
+//
+// or
+//
+//	func TimeMeWithExemplar() {
+//		    timer := NewTimer(myHistogram)
+//		    defer timer.ObserveDurationWithExemplar(exemplar)
+//		    // Do actual work.
+//		}
 func NewTimer(o Observer) *Timer {
 	return &Timer{
 		begin:    time.Now(),
@@ -52,3 +63,19 @@
 	}
 	return d
 }
+
+// ObserveDurationWithExemplar is like ObserveDuration, but it will also
+// observe exemplar with the duration unless exemplar is nil or provided Observer can't
+// be casted to ExemplarObserver.
+func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration {
+	d := time.Since(t.begin)
+	eo, ok := t.observer.(ExemplarObserver)
+	if ok && exemplar != nil {
+		eo.ObserveWithExemplar(d.Seconds(), exemplar)
+		return d
+	}
+	if t.observer != nil {
+		t.observer.Observe(d.Seconds())
+	}
+	return d
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
index c778711..cc23011 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/value.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -14,16 +14,17 @@
 package prometheus
 
 import (
+	"errors"
 	"fmt"
 	"sort"
 	"time"
 	"unicode/utf8"
 
-	//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
-	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/ptypes"
+	"github.com/prometheus/client_golang/prometheus/internal"
 
 	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/types/known/timestamppb"
 )
 
 // ValueType is an enumeration of metric types that represent a simple value.
@@ -38,6 +39,23 @@
 	UntypedValue
 )
 
+var (
+	CounterMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_COUNTER; return &d }()
+	GaugeMetricTypePtr   = func() *dto.MetricType { d := dto.MetricType_GAUGE; return &d }()
+	UntypedMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_UNTYPED; return &d }()
+)
+
+func (v ValueType) ToDTO() *dto.MetricType {
+	switch v {
+	case CounterValue:
+		return CounterMetricTypePtr
+	case GaugeValue:
+		return GaugeMetricTypePtr
+	default:
+		return UntypedMetricTypePtr
+	}
+}
+
 // valueFunc is a generic metric for simple values retrieved on collect time
 // from a function. It implements Metric and Collector. Its effective type is
 // determined by ValueType. This is a low-level building block used by the
@@ -74,7 +92,7 @@
 }
 
 func (v *valueFunc) Write(out *dto.Metric) error {
-	return populateMetric(v.valType, v.function(), v.labelPairs, nil, out)
+	return populateMetric(v.valType, v.function(), v.labelPairs, nil, out, nil)
 }
 
 // NewConstMetric returns a metric with one fixed value that cannot be
@@ -88,14 +106,18 @@
 	if desc.err != nil {
 		return nil, desc.err
 	}
-	if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
 		return nil, err
 	}
+
+	metric := &dto.Metric{}
+	if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, nil); err != nil {
+		return nil, err
+	}
+
 	return &constMetric{
-		desc:       desc,
-		valType:    valueType,
-		val:        value,
-		labelPairs: MakeLabelPairs(desc, labelValues),
+		desc:   desc,
+		metric: metric,
 	}, nil
 }
 
@@ -109,11 +131,46 @@
 	return m
 }
 
+// NewConstMetricWithCreatedTimestamp does the same thing as NewConstMetric, but generates Counters
+// with created timestamp set and returns an error for other metric types.
+func NewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) (Metric, error) {
+	if desc.err != nil {
+		return nil, desc.err
+	}
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+		return nil, err
+	}
+	switch valueType {
+	case CounterValue:
+		break
+	default:
+		return nil, errors.New("created timestamps are only supported for counters")
+	}
+
+	metric := &dto.Metric{}
+	if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, timestamppb.New(ct)); err != nil {
+		return nil, err
+	}
+
+	return &constMetric{
+		desc:   desc,
+		metric: metric,
+	}, nil
+}
+
+// MustNewConstMetricWithCreatedTimestamp is a version of NewConstMetricWithCreatedTimestamp that panics where
+// NewConstMetricWithCreatedTimestamp would have returned an error.
+func MustNewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) Metric {
+	m, err := NewConstMetricWithCreatedTimestamp(desc, valueType, value, ct, labelValues...)
+	if err != nil {
+		panic(err)
+	}
+	return m
+}
+
 type constMetric struct {
-	desc       *Desc
-	valType    ValueType
-	val        float64
-	labelPairs []*dto.LabelPair
+	desc   *Desc
+	metric *dto.Metric
 }
 
 func (m *constMetric) Desc() *Desc {
@@ -121,7 +178,11 @@
 }
 
 func (m *constMetric) Write(out *dto.Metric) error {
-	return populateMetric(m.valType, m.val, m.labelPairs, nil, out)
+	out.Label = m.metric.Label
+	out.Counter = m.metric.Counter
+	out.Gauge = m.metric.Gauge
+	out.Untyped = m.metric.Untyped
+	return nil
 }
 
 func populateMetric(
@@ -130,11 +191,12 @@
 	labelPairs []*dto.LabelPair,
 	e *dto.Exemplar,
 	m *dto.Metric,
+	ct *timestamppb.Timestamp,
 ) error {
 	m.Label = labelPairs
 	switch t {
 	case CounterValue:
-		m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e}
+		m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e, CreatedTimestamp: ct}
 	case GaugeValue:
 		m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
 	case UntypedValue:
@@ -153,29 +215,29 @@
 // This function is only needed for custom Metric implementations. See MetricVec
 // example.
 func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
-	totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
+	totalLen := len(desc.variableLabels.names) + len(desc.constLabelPairs)
 	if totalLen == 0 {
 		// Super fast path.
 		return nil
 	}
-	if len(desc.variableLabels) == 0 {
+	if len(desc.variableLabels.names) == 0 {
 		// Moderately fast path.
 		return desc.constLabelPairs
 	}
 	labelPairs := make([]*dto.LabelPair, 0, totalLen)
-	for i, n := range desc.variableLabels {
+	for i, l := range desc.variableLabels.names {
 		labelPairs = append(labelPairs, &dto.LabelPair{
-			Name:  proto.String(n),
+			Name:  proto.String(l),
 			Value: proto.String(labelValues[i]),
 		})
 	}
 	labelPairs = append(labelPairs, desc.constLabelPairs...)
-	sort.Sort(labelPairSorter(labelPairs))
+	sort.Sort(internal.LabelPairSorter(labelPairs))
 	return labelPairs
 }
 
 // ExemplarMaxRunes is the max total number of runes allowed in exemplar labels.
-const ExemplarMaxRunes = 64
+const ExemplarMaxRunes = 128
 
 // newExemplar creates a new dto.Exemplar from the provided values. An error is
 // returned if any of the label names or values are invalid or if the total
@@ -183,8 +245,8 @@
 func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
 	e := &dto.Exemplar{}
 	e.Value = proto.Float64(value)
-	tsProto, err := ptypes.TimestampProto(ts)
-	if err != nil {
+	tsProto := timestamppb.New(ts)
+	if err := tsProto.CheckValid(); err != nil {
 		return nil, err
 	}
 	e.Timestamp = tsProto
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
index 4ababe6..487b466 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -72,12 +72,14 @@
 // with a performance overhead (for creating and processing the Labels map).
 // See also the CounterVec example.
 func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
+	lvs = constrainLabelValues(m.desc, lvs, m.curry)
+
 	h, err := m.hashLabelValues(lvs)
 	if err != nil {
 		return false
 	}
 
-	return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry)
+	return m.deleteByHashWithLabelValues(h, lvs, m.curry)
 }
 
 // Delete deletes the metric where the variable labels are the same as those
@@ -91,12 +93,28 @@
 // This method is used for the same purpose as DeleteLabelValues(...string). See
 // there for pros and cons of the two methods.
 func (m *MetricVec) Delete(labels Labels) bool {
+	labels, closer := constrainLabels(m.desc, labels)
+	defer closer()
+
 	h, err := m.hashLabels(labels)
 	if err != nil {
 		return false
 	}
 
-	return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
+	return m.deleteByHashWithLabels(h, labels, m.curry)
+}
+
+// DeletePartialMatch deletes all metrics where the variable labels contain all of those
+// passed in as labels. The order of the labels does not matter.
+// It returns the number of metrics deleted.
+//
+// Note that curried labels will never be matched if deleting from the curried vector.
+// To match curried labels with DeletePartialMatch, it must be called on the base vector.
+func (m *MetricVec) DeletePartialMatch(labels Labels) int {
+	labels, closer := constrainLabels(m.desc, labels)
+	defer closer()
+
+	return m.deleteByLabels(labels, m.curry)
 }
 
 // Without explicit forwarding of Describe, Collect, Reset, those methods won't
@@ -134,11 +152,11 @@
 		oldCurry = m.curry
 		iCurry   int
 	)
-	for i, label := range m.desc.variableLabels {
-		val, ok := labels[label]
+	for i, labelName := range m.desc.variableLabels.names {
+		val, ok := labels[labelName]
 		if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
 			if ok {
-				return nil, fmt.Errorf("label name %q is already curried", label)
+				return nil, fmt.Errorf("label name %q is already curried", labelName)
 			}
 			newCurry = append(newCurry, oldCurry[iCurry])
 			iCurry++
@@ -146,7 +164,10 @@
 			if !ok {
 				continue // Label stays uncurried.
 			}
-			newCurry = append(newCurry, curriedLabelValue{i, val})
+			newCurry = append(newCurry, curriedLabelValue{
+				i,
+				m.desc.variableLabels.constrain(labelName, val),
+			})
 		}
 	}
 	if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
@@ -189,12 +210,13 @@
 // a wrapper around MetricVec, implementing a vector for a specific Metric
 // implementation, for example GaugeVec.
 func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
+	lvs = constrainLabelValues(m.desc, lvs, m.curry)
 	h, err := m.hashLabelValues(lvs)
 	if err != nil {
 		return nil, err
 	}
 
-	return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
+	return m.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
 }
 
 // GetMetricWith returns the Metric for the given Labels map (the label names
@@ -214,16 +236,19 @@
 // around MetricVec, implementing a vector for a specific Metric implementation,
 // for example GaugeVec.
 func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
+	labels, closer := constrainLabels(m.desc, labels)
+	defer closer()
+
 	h, err := m.hashLabels(labels)
 	if err != nil {
 		return nil, err
 	}
 
-	return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
+	return m.getOrCreateMetricWithLabels(h, labels, m.curry), nil
 }
 
 func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
-	if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil {
+	if err := validateLabelValues(vals, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
 		return 0, err
 	}
 
@@ -232,7 +257,7 @@
 		curry         = m.curry
 		iVals, iCurry int
 	)
-	for i := 0; i < len(m.desc.variableLabels); i++ {
+	for i := 0; i < len(m.desc.variableLabels.names); i++ {
 		if iCurry < len(curry) && curry[iCurry].index == i {
 			h = m.hashAdd(h, curry[iCurry].value)
 			iCurry++
@@ -246,7 +271,7 @@
 }
 
 func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
-	if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil {
+	if err := validateValuesInLabels(labels, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
 		return 0, err
 	}
 
@@ -255,17 +280,17 @@
 		curry  = m.curry
 		iCurry int
 	)
-	for i, label := range m.desc.variableLabels {
-		val, ok := labels[label]
+	for i, labelName := range m.desc.variableLabels.names {
+		val, ok := labels[labelName]
 		if iCurry < len(curry) && curry[iCurry].index == i {
 			if ok {
-				return 0, fmt.Errorf("label name %q is already curried", label)
+				return 0, fmt.Errorf("label name %q is already curried", labelName)
 			}
 			h = m.hashAdd(h, curry[iCurry].value)
 			iCurry++
 		} else {
 			if !ok {
-				return 0, fmt.Errorf("label name %q missing in label map", label)
+				return 0, fmt.Errorf("label name %q missing in label map", labelName)
 			}
 			h = m.hashAdd(h, val)
 		}
@@ -381,6 +406,82 @@
 	return true
 }
 
+// deleteByLabels deletes a metric if the given labels are present in the metric.
+func (m *metricMap) deleteByLabels(labels Labels, curry []curriedLabelValue) int {
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	var numDeleted int
+
+	for h, metrics := range m.metrics {
+		i := findMetricWithPartialLabels(m.desc, metrics, labels, curry)
+		if i >= len(metrics) {
+			// Didn't find matching labels in this metric slice.
+			continue
+		}
+		delete(m.metrics, h)
+		numDeleted++
+	}
+
+	return numDeleted
+}
+
+// findMetricWithPartialLabel returns the index of the matching metric or
+// len(metrics) if not found.
+func findMetricWithPartialLabels(
+	desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
+) int {
+	for i, metric := range metrics {
+		if matchPartialLabels(desc, metric.values, labels, curry) {
+			return i
+		}
+	}
+	return len(metrics)
+}
+
+// indexOf searches the given slice of strings for the target string and returns
+// the index or len(items) as well as a boolean whether the search succeeded.
+func indexOf(target string, items []string) (int, bool) {
+	for i, l := range items {
+		if l == target {
+			return i, true
+		}
+	}
+	return len(items), false
+}
+
+// valueMatchesVariableOrCurriedValue determines if a value was previously curried,
+// and returns whether it matches either the "base" value or the curried value accordingly.
+// It also indicates whether the match is against a curried or uncurried value.
+func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []string, curry []curriedLabelValue) (bool, bool) {
+	for _, curriedValue := range curry {
+		if curriedValue.index == index {
+			// This label was curried. See if the curried value matches our target.
+			return curriedValue.value == targetValue, true
+		}
+	}
+	// This label was not curried. See if the current value matches our target label.
+	return values[index] == targetValue, false
+}
+
+// matchPartialLabels searches the current metric and returns whether all of the target label:value pairs are present.
+func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
+	for l, v := range labels {
+		// Check if the target label exists in our metrics and get the index.
+		varLabelIndex, validLabel := indexOf(l, desc.variableLabels.names)
+		if validLabel {
+			// Check the value of that label against the target value.
+			// We don't consider curried values in partial matches.
+			matches, curried := valueMatchesVariableOrCurriedValue(v, varLabelIndex, values, curry)
+			if matches && !curried {
+				continue
+			}
+		}
+		return false
+	}
+	return true
+}
+
 // getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
 // or creates it and returns the new one.
 //
@@ -406,7 +507,7 @@
 	return metric
 }
 
-// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// getOrCreateMetricWithLabels retrieves the metric by hash and label value
 // or creates it and returns the new one.
 //
 // This function holds the mutex.
@@ -485,7 +586,7 @@
 	return len(metrics)
 }
 
-func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool {
+func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool {
 	if len(values) != len(lvs)+len(curry) {
 		return false
 	}
@@ -511,7 +612,7 @@
 		return false
 	}
 	iCurry := 0
-	for i, k := range desc.variableLabels {
+	for i, k := range desc.variableLabels.names {
 		if iCurry < len(curry) && curry[iCurry].index == i {
 			if values[i] != curry[iCurry].value {
 				return false
@@ -529,7 +630,7 @@
 func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
 	labelValues := make([]string, len(labels)+len(curry))
 	iCurry := 0
-	for i, k := range desc.variableLabels {
+	for i, k := range desc.variableLabels.names {
 		if iCurry < len(curry) && curry[iCurry].index == i {
 			labelValues[i] = curry[iCurry].value
 			iCurry++
@@ -554,3 +655,55 @@
 	}
 	return labelValues
 }
+
+var labelsPool = &sync.Pool{
+	New: func() interface{} {
+		return make(Labels)
+	},
+}
+
+func constrainLabels(desc *Desc, labels Labels) (Labels, func()) {
+	if len(desc.variableLabels.labelConstraints) == 0 {
+		// Fast path when there's no constraints
+		return labels, func() {}
+	}
+
+	constrainedLabels := labelsPool.Get().(Labels)
+	for l, v := range labels {
+		constrainedLabels[l] = desc.variableLabels.constrain(l, v)
+	}
+
+	return constrainedLabels, func() {
+		for k := range constrainedLabels {
+			delete(constrainedLabels, k)
+		}
+		labelsPool.Put(constrainedLabels)
+	}
+}
+
+func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
+	if len(desc.variableLabels.labelConstraints) == 0 {
+		// Fast path when there's no constraints
+		return lvs
+	}
+
+	constrainedValues := make([]string, len(lvs))
+	var iCurry, iLVs int
+	for i := 0; i < len(lvs)+len(curry); i++ {
+		if iCurry < len(curry) && curry[iCurry].index == i {
+			iCurry++
+			continue
+		}
+
+		if i < len(desc.variableLabels.names) {
+			constrainedValues[iLVs] = desc.variableLabels.constrain(
+				desc.variableLabels.names[i],
+				lvs[iLVs],
+			)
+		} else {
+			constrainedValues[iLVs] = lvs[iLVs]
+		}
+		iLVs++
+	}
+	return constrainedValues
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vnext.go b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go
new file mode 100644
index 0000000..42bc3a8
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go
@@ -0,0 +1,23 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+type v2 struct{}
+
+// V2 is a struct that can be referenced to access experimental API that might
+// be present in v2 of client golang someday. It offers extended functionality
+// of v1 with slightly changed API. It is acceptable to use some pieces from v1
+// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc`
+// in the same codebase.
+var V2 = v2{}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
index 74ee932..2ed1285 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
@@ -17,10 +17,10 @@
 	"fmt"
 	"sort"
 
-	//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
-	"github.com/golang/protobuf/proto"
+	"github.com/prometheus/client_golang/prometheus/internal"
 
 	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/proto"
 )
 
 // WrapRegistererWith returns a Registerer wrapping the provided
@@ -63,7 +63,7 @@
 // metric names that are standardized across applications, as that would break
 // horizontal monitoring, for example the metrics provided by the Go collector
 // (see NewGoCollector) and the process collector (see NewProcessCollector). (In
-// fact, those metrics are already prefixed with “go_” or “process_”,
+// fact, those metrics are already prefixed with "go_" or "process_",
 // respectively.)
 //
 // Conflicts between Collectors registered through the original Registerer with
@@ -78,6 +78,40 @@
 	}
 }
 
+// WrapCollectorWith returns a Collector wrapping the provided Collector. The
+// wrapped Collector will add the provided Labels to all Metrics it collects (as
+// ConstLabels). The Metrics collected by the unmodified Collector must not
+// duplicate any of those labels.
+//
+// WrapCollectorWith can be useful to work with multiple instances of a third
+// party library that does not expose enough flexibility on the lifecycle of its
+// registered metrics.
+// For example, let's say you have a foo.New(reg Registerer) constructor that
+// registers metrics but never unregisters them, and you want to create multiple
+// instances of foo.Foo with different labels.
+// The way to achieve that, is to create a new Registry, pass it to foo.New,
+// then use WrapCollectorWith to wrap that Registry with the desired labels and
+// register that as a collector in your main Registry.
+// Then you can un-register the wrapped collector effectively un-registering the
+// metrics registered by foo.New.
+func WrapCollectorWith(labels Labels, c Collector) Collector {
+	return &wrappingCollector{
+		wrappedCollector: c,
+		labels:           labels,
+	}
+}
+
+// WrapCollectorWithPrefix returns a Collector wrapping the provided Collector. The
+// wrapped Collector will add the provided prefix to the name of all Metrics it collects.
+//
+// See the documentation of WrapCollectorWith for more details on the use case.
+func WrapCollectorWithPrefix(prefix string, c Collector) Collector {
+	return &wrappingCollector{
+		wrappedCollector: c,
+		prefix:           prefix,
+	}
+}
+
 type wrappingRegisterer struct {
 	wrappedRegisterer Registerer
 	prefix            string
@@ -182,7 +216,7 @@
 			Value: proto.String(lv),
 		})
 	}
-	sort.Sort(labelPairSorter(out.Label))
+	sort.Sort(internal.LabelPairSorter(out.Label))
 	return nil
 }
 
@@ -204,7 +238,7 @@
 		constLabels[ln] = lv
 	}
 	// NewDesc will do remaining validations.
-	newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
+	newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
 	// Propagate errors if there was any. This will override any errer
 	// created by NewDesc above, i.e. earlier errors get precedence.
 	if desc.err != nil {
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
index 2f4930d..2f15490 100644
--- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -1,51 +1,75 @@
+// Copyright 2013 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 // Code generated by protoc-gen-go. DO NOT EDIT.
-// source: metrics.proto
+// versions:
+// 	protoc-gen-go v1.30.0
+// 	protoc        v3.20.3
+// source: io/prometheus/client/metrics.proto
 
 package io_prometheus_client
 
 import (
-	fmt "fmt"
-	proto "github.com/golang/protobuf/proto"
-	timestamp "github.com/golang/protobuf/ptypes/timestamp"
-	math "math"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+	reflect "reflect"
+	sync "sync"
 )
 
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
 
 type MetricType int32
 
 const (
-	MetricType_COUNTER   MetricType = 0
-	MetricType_GAUGE     MetricType = 1
-	MetricType_SUMMARY   MetricType = 2
-	MetricType_UNTYPED   MetricType = 3
+	// COUNTER must use the Metric field "counter".
+	MetricType_COUNTER MetricType = 0
+	// GAUGE must use the Metric field "gauge".
+	MetricType_GAUGE MetricType = 1
+	// SUMMARY must use the Metric field "summary".
+	MetricType_SUMMARY MetricType = 2
+	// UNTYPED must use the Metric field "untyped".
+	MetricType_UNTYPED MetricType = 3
+	// HISTOGRAM must use the Metric field "histogram".
 	MetricType_HISTOGRAM MetricType = 4
+	// GAUGE_HISTOGRAM must use the Metric field "histogram".
+	MetricType_GAUGE_HISTOGRAM MetricType = 5
 )
 
-var MetricType_name = map[int32]string{
-	0: "COUNTER",
-	1: "GAUGE",
-	2: "SUMMARY",
-	3: "UNTYPED",
-	4: "HISTOGRAM",
-}
-
-var MetricType_value = map[string]int32{
-	"COUNTER":   0,
-	"GAUGE":     1,
-	"SUMMARY":   2,
-	"UNTYPED":   3,
-	"HISTOGRAM": 4,
-}
+// Enum value maps for MetricType.
+var (
+	MetricType_name = map[int32]string{
+		0: "COUNTER",
+		1: "GAUGE",
+		2: "SUMMARY",
+		3: "UNTYPED",
+		4: "HISTOGRAM",
+		5: "GAUGE_HISTOGRAM",
+	}
+	MetricType_value = map[string]int32{
+		"COUNTER":         0,
+		"GAUGE":           1,
+		"SUMMARY":         2,
+		"UNTYPED":         3,
+		"HISTOGRAM":       4,
+		"GAUGE_HISTOGRAM": 5,
+	}
+)
 
 func (x MetricType) Enum() *MetricType {
 	p := new(MetricType)
@@ -54,670 +78,1322 @@
 }
 
 func (x MetricType) String() string {
-	return proto.EnumName(MetricType_name, int32(x))
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
 }
 
-func (x *MetricType) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
+func (MetricType) Descriptor() protoreflect.EnumDescriptor {
+	return file_io_prometheus_client_metrics_proto_enumTypes[0].Descriptor()
+}
+
+func (MetricType) Type() protoreflect.EnumType {
+	return &file_io_prometheus_client_metrics_proto_enumTypes[0]
+}
+
+func (x MetricType) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *MetricType) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
 	if err != nil {
 		return err
 	}
-	*x = MetricType(value)
+	*x = MetricType(num)
 	return nil
 }
 
+// Deprecated: Use MetricType.Descriptor instead.
 func (MetricType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{0}
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
 }
 
 type LabelPair struct {
-	Name                 *string  `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Value                *string  `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Name  *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
 }
 
-func (m *LabelPair) Reset()         { *m = LabelPair{} }
-func (m *LabelPair) String() string { return proto.CompactTextString(m) }
-func (*LabelPair) ProtoMessage()    {}
+func (x *LabelPair) Reset() {
+	*x = LabelPair{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *LabelPair) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LabelPair) ProtoMessage() {}
+
+func (x *LabelPair) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use LabelPair.ProtoReflect.Descriptor instead.
 func (*LabelPair) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{0}
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
 }
 
-func (m *LabelPair) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_LabelPair.Unmarshal(m, b)
-}
-func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
-}
-func (m *LabelPair) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LabelPair.Merge(m, src)
-}
-func (m *LabelPair) XXX_Size() int {
-	return xxx_messageInfo_LabelPair.Size(m)
-}
-func (m *LabelPair) XXX_DiscardUnknown() {
-	xxx_messageInfo_LabelPair.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LabelPair proto.InternalMessageInfo
-
-func (m *LabelPair) GetName() string {
-	if m != nil && m.Name != nil {
-		return *m.Name
+func (x *LabelPair) GetName() string {
+	if x != nil && x.Name != nil {
+		return *x.Name
 	}
 	return ""
 }
 
-func (m *LabelPair) GetValue() string {
-	if m != nil && m.Value != nil {
-		return *m.Value
+func (x *LabelPair) GetValue() string {
+	if x != nil && x.Value != nil {
+		return *x.Value
 	}
 	return ""
 }
 
 type Gauge struct {
-	Value                *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
 }
 
-func (m *Gauge) Reset()         { *m = Gauge{} }
-func (m *Gauge) String() string { return proto.CompactTextString(m) }
-func (*Gauge) ProtoMessage()    {}
+func (x *Gauge) Reset() {
+	*x = Gauge{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Gauge) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Gauge) ProtoMessage() {}
+
+func (x *Gauge) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Gauge.ProtoReflect.Descriptor instead.
 func (*Gauge) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{1}
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{1}
 }
 
-func (m *Gauge) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Gauge.Unmarshal(m, b)
-}
-func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
-}
-func (m *Gauge) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Gauge.Merge(m, src)
-}
-func (m *Gauge) XXX_Size() int {
-	return xxx_messageInfo_Gauge.Size(m)
-}
-func (m *Gauge) XXX_DiscardUnknown() {
-	xxx_messageInfo_Gauge.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Gauge proto.InternalMessageInfo
-
-func (m *Gauge) GetValue() float64 {
-	if m != nil && m.Value != nil {
-		return *m.Value
+func (x *Gauge) GetValue() float64 {
+	if x != nil && x.Value != nil {
+		return *x.Value
 	}
 	return 0
 }
 
 type Counter struct {
-	Value                *float64  `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
-	Exemplar             *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Value            *float64               `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+	Exemplar         *Exemplar              `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
+	CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
 }
 
-func (m *Counter) Reset()         { *m = Counter{} }
-func (m *Counter) String() string { return proto.CompactTextString(m) }
-func (*Counter) ProtoMessage()    {}
+func (x *Counter) Reset() {
+	*x = Counter{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Counter) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Counter) ProtoMessage() {}
+
+func (x *Counter) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Counter.ProtoReflect.Descriptor instead.
 func (*Counter) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{2}
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{2}
 }
 
-func (m *Counter) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Counter.Unmarshal(m, b)
-}
-func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
-}
-func (m *Counter) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Counter.Merge(m, src)
-}
-func (m *Counter) XXX_Size() int {
-	return xxx_messageInfo_Counter.Size(m)
-}
-func (m *Counter) XXX_DiscardUnknown() {
-	xxx_messageInfo_Counter.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Counter proto.InternalMessageInfo
-
-func (m *Counter) GetValue() float64 {
-	if m != nil && m.Value != nil {
-		return *m.Value
+func (x *Counter) GetValue() float64 {
+	if x != nil && x.Value != nil {
+		return *x.Value
 	}
 	return 0
 }
 
-func (m *Counter) GetExemplar() *Exemplar {
-	if m != nil {
-		return m.Exemplar
+func (x *Counter) GetExemplar() *Exemplar {
+	if x != nil {
+		return x.Exemplar
+	}
+	return nil
+}
+
+func (x *Counter) GetCreatedTimestamp() *timestamppb.Timestamp {
+	if x != nil {
+		return x.CreatedTimestamp
 	}
 	return nil
 }
 
 type Quantile struct {
-	Quantile             *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
-	Value                *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+	Value    *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
 }
 
-func (m *Quantile) Reset()         { *m = Quantile{} }
-func (m *Quantile) String() string { return proto.CompactTextString(m) }
-func (*Quantile) ProtoMessage()    {}
+func (x *Quantile) Reset() {
+	*x = Quantile{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Quantile) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Quantile) ProtoMessage() {}
+
+func (x *Quantile) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Quantile.ProtoReflect.Descriptor instead.
 func (*Quantile) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{3}
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{3}
 }
 
-func (m *Quantile) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Quantile.Unmarshal(m, b)
-}
-func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
-}
-func (m *Quantile) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Quantile.Merge(m, src)
-}
-func (m *Quantile) XXX_Size() int {
-	return xxx_messageInfo_Quantile.Size(m)
-}
-func (m *Quantile) XXX_DiscardUnknown() {
-	xxx_messageInfo_Quantile.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Quantile proto.InternalMessageInfo
-
-func (m *Quantile) GetQuantile() float64 {
-	if m != nil && m.Quantile != nil {
-		return *m.Quantile
+func (x *Quantile) GetQuantile() float64 {
+	if x != nil && x.Quantile != nil {
+		return *x.Quantile
 	}
 	return 0
 }
 
-func (m *Quantile) GetValue() float64 {
-	if m != nil && m.Value != nil {
-		return *m.Value
+func (x *Quantile) GetValue() float64 {
+	if x != nil && x.Value != nil {
+		return *x.Value
 	}
 	return 0
 }
 
 type Summary struct {
-	SampleCount          *uint64     `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
-	SampleSum            *float64    `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
-	Quantile             []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
-	XXX_unrecognized     []byte      `json:"-"`
-	XXX_sizecache        int32       `json:"-"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	SampleCount      *uint64                `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+	SampleSum        *float64               `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+	Quantile         []*Quantile            `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+	CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
 }
 
-func (m *Summary) Reset()         { *m = Summary{} }
-func (m *Summary) String() string { return proto.CompactTextString(m) }
-func (*Summary) ProtoMessage()    {}
+func (x *Summary) Reset() {
+	*x = Summary{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Summary) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Summary) ProtoMessage() {}
+
+func (x *Summary) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Summary.ProtoReflect.Descriptor instead.
 func (*Summary) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{4}
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{4}
 }
 
-func (m *Summary) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Summary.Unmarshal(m, b)
-}
-func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
-}
-func (m *Summary) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Summary.Merge(m, src)
-}
-func (m *Summary) XXX_Size() int {
-	return xxx_messageInfo_Summary.Size(m)
-}
-func (m *Summary) XXX_DiscardUnknown() {
-	xxx_messageInfo_Summary.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Summary proto.InternalMessageInfo
-
-func (m *Summary) GetSampleCount() uint64 {
-	if m != nil && m.SampleCount != nil {
-		return *m.SampleCount
+func (x *Summary) GetSampleCount() uint64 {
+	if x != nil && x.SampleCount != nil {
+		return *x.SampleCount
 	}
 	return 0
 }
 
-func (m *Summary) GetSampleSum() float64 {
-	if m != nil && m.SampleSum != nil {
-		return *m.SampleSum
+func (x *Summary) GetSampleSum() float64 {
+	if x != nil && x.SampleSum != nil {
+		return *x.SampleSum
 	}
 	return 0
 }
 
-func (m *Summary) GetQuantile() []*Quantile {
-	if m != nil {
-		return m.Quantile
+func (x *Summary) GetQuantile() []*Quantile {
+	if x != nil {
+		return x.Quantile
+	}
+	return nil
+}
+
+func (x *Summary) GetCreatedTimestamp() *timestamppb.Timestamp {
+	if x != nil {
+		return x.CreatedTimestamp
 	}
 	return nil
 }
 
 type Untyped struct {
-	Value                *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
 }
 
-func (m *Untyped) Reset()         { *m = Untyped{} }
-func (m *Untyped) String() string { return proto.CompactTextString(m) }
-func (*Untyped) ProtoMessage()    {}
+func (x *Untyped) Reset() {
+	*x = Untyped{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Untyped) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Untyped) ProtoMessage() {}
+
+func (x *Untyped) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Untyped.ProtoReflect.Descriptor instead.
 func (*Untyped) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{5}
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{5}
 }
 
-func (m *Untyped) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Untyped.Unmarshal(m, b)
-}
-func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
-}
-func (m *Untyped) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Untyped.Merge(m, src)
-}
-func (m *Untyped) XXX_Size() int {
-	return xxx_messageInfo_Untyped.Size(m)
-}
-func (m *Untyped) XXX_DiscardUnknown() {
-	xxx_messageInfo_Untyped.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Untyped proto.InternalMessageInfo
-
-func (m *Untyped) GetValue() float64 {
-	if m != nil && m.Value != nil {
-		return *m.Value
+func (x *Untyped) GetValue() float64 {
+	if x != nil && x.Value != nil {
+		return *x.Value
 	}
 	return 0
 }
 
 type Histogram struct {
-	SampleCount          *uint64   `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
-	SampleSum            *float64  `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
-	Bucket               []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	SampleCount      *uint64  `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+	SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0.
+	SampleSum        *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+	// Buckets for the conventional histogram.
+	Bucket           []*Bucket              `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional.
+	CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
+	// schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
+	// They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
+	// then each power of two is divided into 2^n logarithmic buckets.
+	// Or in other words, each bucket boundary is the previous boundary times 2^(2^-n).
+	// In the future, more bucket schemas may be added using numbers < -4 or > 8.
+	Schema         *int32   `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"`
+	ZeroThreshold  *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"`      // Breadth of the zero bucket.
+	ZeroCount      *uint64  `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"`                   // Count in zero bucket.
+	ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` // Overrides sb_zero_count if > 0.
+	// Negative buckets for the native histogram.
+	NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"`
+	// Use either "negative_delta" or "negative_count", the former for
+	// regular histograms with integer counts, the latter for float
+	// histograms.
+	NegativeDelta []int64   `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+	NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"`  // Absolute count of each bucket.
+	// Positive buckets for the native histogram.
+	// Use a no-op span (offset 0, length 0) for a native histogram without any
+	// observations yet and with a zero_threshold of 0. Otherwise, it would be
+	// indistinguishable from a classic histogram.
+	PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"`
+	// Use either "positive_delta" or "positive_count", the former for
+	// regular histograms with integer counts, the latter for float
+	// histograms.
+	PositiveDelta []int64   `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+	PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"`  // Absolute count of each bucket.
+	// Only used for native histograms. These exemplars MUST have a timestamp.
+	Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars" json:"exemplars,omitempty"`
 }
 
-func (m *Histogram) Reset()         { *m = Histogram{} }
-func (m *Histogram) String() string { return proto.CompactTextString(m) }
-func (*Histogram) ProtoMessage()    {}
+func (x *Histogram) Reset() {
+	*x = Histogram{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Histogram) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Histogram) ProtoMessage() {}
+
+func (x *Histogram) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Histogram.ProtoReflect.Descriptor instead.
 func (*Histogram) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{6}
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{6}
 }
 
-func (m *Histogram) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Histogram.Unmarshal(m, b)
-}
-func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
-}
-func (m *Histogram) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Histogram.Merge(m, src)
-}
-func (m *Histogram) XXX_Size() int {
-	return xxx_messageInfo_Histogram.Size(m)
-}
-func (m *Histogram) XXX_DiscardUnknown() {
-	xxx_messageInfo_Histogram.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Histogram proto.InternalMessageInfo
-
-func (m *Histogram) GetSampleCount() uint64 {
-	if m != nil && m.SampleCount != nil {
-		return *m.SampleCount
+func (x *Histogram) GetSampleCount() uint64 {
+	if x != nil && x.SampleCount != nil {
+		return *x.SampleCount
 	}
 	return 0
 }
 
-func (m *Histogram) GetSampleSum() float64 {
-	if m != nil && m.SampleSum != nil {
-		return *m.SampleSum
+func (x *Histogram) GetSampleCountFloat() float64 {
+	if x != nil && x.SampleCountFloat != nil {
+		return *x.SampleCountFloat
 	}
 	return 0
 }
 
-func (m *Histogram) GetBucket() []*Bucket {
-	if m != nil {
-		return m.Bucket
+func (x *Histogram) GetSampleSum() float64 {
+	if x != nil && x.SampleSum != nil {
+		return *x.SampleSum
+	}
+	return 0
+}
+
+func (x *Histogram) GetBucket() []*Bucket {
+	if x != nil {
+		return x.Bucket
 	}
 	return nil
 }
 
+func (x *Histogram) GetCreatedTimestamp() *timestamppb.Timestamp {
+	if x != nil {
+		return x.CreatedTimestamp
+	}
+	return nil
+}
+
+func (x *Histogram) GetSchema() int32 {
+	if x != nil && x.Schema != nil {
+		return *x.Schema
+	}
+	return 0
+}
+
+func (x *Histogram) GetZeroThreshold() float64 {
+	if x != nil && x.ZeroThreshold != nil {
+		return *x.ZeroThreshold
+	}
+	return 0
+}
+
+func (x *Histogram) GetZeroCount() uint64 {
+	if x != nil && x.ZeroCount != nil {
+		return *x.ZeroCount
+	}
+	return 0
+}
+
+func (x *Histogram) GetZeroCountFloat() float64 {
+	if x != nil && x.ZeroCountFloat != nil {
+		return *x.ZeroCountFloat
+	}
+	return 0
+}
+
+func (x *Histogram) GetNegativeSpan() []*BucketSpan {
+	if x != nil {
+		return x.NegativeSpan
+	}
+	return nil
+}
+
+func (x *Histogram) GetNegativeDelta() []int64 {
+	if x != nil {
+		return x.NegativeDelta
+	}
+	return nil
+}
+
+func (x *Histogram) GetNegativeCount() []float64 {
+	if x != nil {
+		return x.NegativeCount
+	}
+	return nil
+}
+
+func (x *Histogram) GetPositiveSpan() []*BucketSpan {
+	if x != nil {
+		return x.PositiveSpan
+	}
+	return nil
+}
+
+func (x *Histogram) GetPositiveDelta() []int64 {
+	if x != nil {
+		return x.PositiveDelta
+	}
+	return nil
+}
+
+func (x *Histogram) GetPositiveCount() []float64 {
+	if x != nil {
+		return x.PositiveCount
+	}
+	return nil
+}
+
+func (x *Histogram) GetExemplars() []*Exemplar {
+	if x != nil {
+		return x.Exemplars
+	}
+	return nil
+}
+
+// A Bucket of a conventional histogram, each of which is treated as
+// an individual counter-like time series by Prometheus.
 type Bucket struct {
-	CumulativeCount      *uint64   `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
-	UpperBound           *float64  `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	CumulativeCount      *uint64   `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`                   // Cumulative in increasing order.
+	CumulativeCountFloat *float64  `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` // Overrides cumulative_count if > 0.
+	UpperBound           *float64  `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`                                 // Inclusive.
 	Exemplar             *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
 }
 
-func (m *Bucket) Reset()         { *m = Bucket{} }
-func (m *Bucket) String() string { return proto.CompactTextString(m) }
-func (*Bucket) ProtoMessage()    {}
+func (x *Bucket) Reset() {
+	*x = Bucket{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Bucket) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bucket) ProtoMessage() {}
+
+func (x *Bucket) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bucket.ProtoReflect.Descriptor instead.
 func (*Bucket) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{7}
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{7}
 }
 
-func (m *Bucket) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Bucket.Unmarshal(m, b)
-}
-func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
-}
-func (m *Bucket) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Bucket.Merge(m, src)
-}
-func (m *Bucket) XXX_Size() int {
-	return xxx_messageInfo_Bucket.Size(m)
-}
-func (m *Bucket) XXX_DiscardUnknown() {
-	xxx_messageInfo_Bucket.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Bucket proto.InternalMessageInfo
-
-func (m *Bucket) GetCumulativeCount() uint64 {
-	if m != nil && m.CumulativeCount != nil {
-		return *m.CumulativeCount
+func (x *Bucket) GetCumulativeCount() uint64 {
+	if x != nil && x.CumulativeCount != nil {
+		return *x.CumulativeCount
 	}
 	return 0
 }
 
-func (m *Bucket) GetUpperBound() float64 {
-	if m != nil && m.UpperBound != nil {
-		return *m.UpperBound
+func (x *Bucket) GetCumulativeCountFloat() float64 {
+	if x != nil && x.CumulativeCountFloat != nil {
+		return *x.CumulativeCountFloat
 	}
 	return 0
 }
 
-func (m *Bucket) GetExemplar() *Exemplar {
-	if m != nil {
-		return m.Exemplar
+func (x *Bucket) GetUpperBound() float64 {
+	if x != nil && x.UpperBound != nil {
+		return *x.UpperBound
+	}
+	return 0
+}
+
+func (x *Bucket) GetExemplar() *Exemplar {
+	if x != nil {
+		return x.Exemplar
 	}
 	return nil
 }
 
+// A BucketSpan defines a number of consecutive buckets in a native
+// histogram with their offset. Logically, it would be more
+// straightforward to include the bucket counts in the Span. However,
+// the protobuf representation is more compact in the way the data is
+// structured here (with all the buckets in a single array separate
+// from the Spans).
+type BucketSpan struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Offset *int32  `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative).
+	Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"`   // Length of consecutive buckets.
+}
+
+func (x *BucketSpan) Reset() {
+	*x = BucketSpan{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *BucketSpan) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BucketSpan) ProtoMessage() {}
+
+func (x *BucketSpan) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead.
+func (*BucketSpan) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *BucketSpan) GetOffset() int32 {
+	if x != nil && x.Offset != nil {
+		return *x.Offset
+	}
+	return 0
+}
+
+func (x *BucketSpan) GetLength() uint32 {
+	if x != nil && x.Length != nil {
+		return *x.Length
+	}
+	return 0
+}
+
 type Exemplar struct {
-	Label                []*LabelPair         `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
-	Value                *float64             `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
-	Timestamp            *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
-	XXX_unrecognized     []byte               `json:"-"`
-	XXX_sizecache        int32                `json:"-"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Label     []*LabelPair           `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+	Value     *float64               `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+	Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` // OpenMetrics-style.
 }
 
-func (m *Exemplar) Reset()         { *m = Exemplar{} }
-func (m *Exemplar) String() string { return proto.CompactTextString(m) }
-func (*Exemplar) ProtoMessage()    {}
+func (x *Exemplar) Reset() {
+	*x = Exemplar{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Exemplar) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Exemplar) ProtoMessage() {}
+
+func (x *Exemplar) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead.
 func (*Exemplar) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{8}
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{9}
 }
 
-func (m *Exemplar) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Exemplar.Unmarshal(m, b)
-}
-func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
-}
-func (m *Exemplar) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Exemplar.Merge(m, src)
-}
-func (m *Exemplar) XXX_Size() int {
-	return xxx_messageInfo_Exemplar.Size(m)
-}
-func (m *Exemplar) XXX_DiscardUnknown() {
-	xxx_messageInfo_Exemplar.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Exemplar proto.InternalMessageInfo
-
-func (m *Exemplar) GetLabel() []*LabelPair {
-	if m != nil {
-		return m.Label
+func (x *Exemplar) GetLabel() []*LabelPair {
+	if x != nil {
+		return x.Label
 	}
 	return nil
 }
 
-func (m *Exemplar) GetValue() float64 {
-	if m != nil && m.Value != nil {
-		return *m.Value
+func (x *Exemplar) GetValue() float64 {
+	if x != nil && x.Value != nil {
+		return *x.Value
 	}
 	return 0
 }
 
-func (m *Exemplar) GetTimestamp() *timestamp.Timestamp {
-	if m != nil {
-		return m.Timestamp
+func (x *Exemplar) GetTimestamp() *timestamppb.Timestamp {
+	if x != nil {
+		return x.Timestamp
 	}
 	return nil
 }
 
 type Metric struct {
-	Label                []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
-	Gauge                *Gauge       `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
-	Counter              *Counter     `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
-	Summary              *Summary     `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
-	Untyped              *Untyped     `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
-	Histogram            *Histogram   `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
-	TimestampMs          *int64       `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
-	XXX_unrecognized     []byte       `json:"-"`
-	XXX_sizecache        int32        `json:"-"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Label       []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+	Gauge       *Gauge       `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+	Counter     *Counter     `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+	Summary     *Summary     `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+	Untyped     *Untyped     `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+	Histogram   *Histogram   `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+	TimestampMs *int64       `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
 }
 
-func (m *Metric) Reset()         { *m = Metric{} }
-func (m *Metric) String() string { return proto.CompactTextString(m) }
-func (*Metric) ProtoMessage()    {}
+func (x *Metric) Reset() {
+	*x = Metric{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Metric) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Metric) ProtoMessage() {}
+
+func (x *Metric) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Metric.ProtoReflect.Descriptor instead.
 func (*Metric) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{9}
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{10}
 }
 
-func (m *Metric) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Metric.Unmarshal(m, b)
-}
-func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
-}
-func (m *Metric) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Metric.Merge(m, src)
-}
-func (m *Metric) XXX_Size() int {
-	return xxx_messageInfo_Metric.Size(m)
-}
-func (m *Metric) XXX_DiscardUnknown() {
-	xxx_messageInfo_Metric.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Metric proto.InternalMessageInfo
-
-func (m *Metric) GetLabel() []*LabelPair {
-	if m != nil {
-		return m.Label
+func (x *Metric) GetLabel() []*LabelPair {
+	if x != nil {
+		return x.Label
 	}
 	return nil
 }
 
-func (m *Metric) GetGauge() *Gauge {
-	if m != nil {
-		return m.Gauge
+func (x *Metric) GetGauge() *Gauge {
+	if x != nil {
+		return x.Gauge
 	}
 	return nil
 }
 
-func (m *Metric) GetCounter() *Counter {
-	if m != nil {
-		return m.Counter
+func (x *Metric) GetCounter() *Counter {
+	if x != nil {
+		return x.Counter
 	}
 	return nil
 }
 
-func (m *Metric) GetSummary() *Summary {
-	if m != nil {
-		return m.Summary
+func (x *Metric) GetSummary() *Summary {
+	if x != nil {
+		return x.Summary
 	}
 	return nil
 }
 
-func (m *Metric) GetUntyped() *Untyped {
-	if m != nil {
-		return m.Untyped
+func (x *Metric) GetUntyped() *Untyped {
+	if x != nil {
+		return x.Untyped
 	}
 	return nil
 }
 
-func (m *Metric) GetHistogram() *Histogram {
-	if m != nil {
-		return m.Histogram
+func (x *Metric) GetHistogram() *Histogram {
+	if x != nil {
+		return x.Histogram
 	}
 	return nil
 }
 
-func (m *Metric) GetTimestampMs() int64 {
-	if m != nil && m.TimestampMs != nil {
-		return *m.TimestampMs
+func (x *Metric) GetTimestampMs() int64 {
+	if x != nil && x.TimestampMs != nil {
+		return *x.TimestampMs
 	}
 	return 0
 }
 
 type MetricFamily struct {
-	Name                 *string     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Help                 *string     `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
-	Type                 *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
-	Metric               []*Metric   `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
-	XXX_unrecognized     []byte      `json:"-"`
-	XXX_sizecache        int32       `json:"-"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Name   *string     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Help   *string     `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+	Type   *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+	Metric []*Metric   `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+	Unit   *string     `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"`
 }
 
-func (m *MetricFamily) Reset()         { *m = MetricFamily{} }
-func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
-func (*MetricFamily) ProtoMessage()    {}
+func (x *MetricFamily) Reset() {
+	*x = MetricFamily{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *MetricFamily) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetricFamily) ProtoMessage() {}
+
+func (x *MetricFamily) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetricFamily.ProtoReflect.Descriptor instead.
 func (*MetricFamily) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{10}
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{11}
 }
 
-func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
-}
-func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
-}
-func (m *MetricFamily) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MetricFamily.Merge(m, src)
-}
-func (m *MetricFamily) XXX_Size() int {
-	return xxx_messageInfo_MetricFamily.Size(m)
-}
-func (m *MetricFamily) XXX_DiscardUnknown() {
-	xxx_messageInfo_MetricFamily.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MetricFamily proto.InternalMessageInfo
-
-func (m *MetricFamily) GetName() string {
-	if m != nil && m.Name != nil {
-		return *m.Name
+func (x *MetricFamily) GetName() string {
+	if x != nil && x.Name != nil {
+		return *x.Name
 	}
 	return ""
 }
 
-func (m *MetricFamily) GetHelp() string {
-	if m != nil && m.Help != nil {
-		return *m.Help
+func (x *MetricFamily) GetHelp() string {
+	if x != nil && x.Help != nil {
+		return *x.Help
 	}
 	return ""
 }
 
-func (m *MetricFamily) GetType() MetricType {
-	if m != nil && m.Type != nil {
-		return *m.Type
+func (x *MetricFamily) GetType() MetricType {
+	if x != nil && x.Type != nil {
+		return *x.Type
 	}
 	return MetricType_COUNTER
 }
 
-func (m *MetricFamily) GetMetric() []*Metric {
-	if m != nil {
-		return m.Metric
+func (x *MetricFamily) GetMetric() []*Metric {
+	if x != nil {
+		return x.Metric
 	}
 	return nil
 }
 
-func init() {
-	proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
-	proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
-	proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
-	proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
-	proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile")
-	proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary")
-	proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
-	proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
-	proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
-	proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar")
-	proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
-	proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
+func (x *MetricFamily) GetUnit() string {
+	if x != nil && x.Unit != nil {
+		return *x.Unit
+	}
+	return ""
 }
 
-func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) }
+var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor
 
-var fileDescriptor_6039342a2ba47b72 = []byte{
-	// 665 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c,
-	0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55,
-	0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2,
-	0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e,
-	0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa,
-	0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66,
-	0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4,
-	0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45,
-	0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a,
-	0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d,
-	0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b,
-	0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22,
-	0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79,
-	0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0,
-	0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00,
-	0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01,
-	0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe,
-	0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55,
-	0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f,
-	0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31,
-	0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16,
-	0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e,
-	0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c,
-	0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f,
-	0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57,
-	0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64,
-	0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76,
-	0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7,
-	0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95,
-	0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed,
-	0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33,
-	0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07,
-	0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72,
-	0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56,
-	0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6,
-	0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f,
-	0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f,
-	0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27,
-	0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83,
-	0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24,
-	0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff,
-	0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00,
+var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
+	0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f,
+	0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+	0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65,
+	0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4c,
+	0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
+	0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x22, 0xa4, 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a,
+	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61,
+	0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
+	0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65,
+	0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12,
+	0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73,
+	0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+	0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54,
+	0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3c, 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e,
+	0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+	0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52,
+	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61,
+	0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75,
+	0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65,
+	0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f,
+	0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c,
+	0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+	0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
+	0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75,
+	0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+	0x12, 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+	0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+	0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
+	0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74,
+	0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xea, 0x05, 0x0a, 0x09, 0x48,
+	0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70,
+	0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b,
+	0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73,
+	0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61,
+	0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43,
+	0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d,
+	0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73,
+	0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b,
+	0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
+	0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
+	0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x47,
+	0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74,
+	0x61, 0x6d, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+	0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69,
+	0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d,
+	0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12,
+	0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c,
+	0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72,
+	0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63,
+	0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x7a, 0x65, 0x72, 0x6f,
+	0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f,
+	0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52,
+	0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12,
+	0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e,
+	0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
+	0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75,
+	0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
+	0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
+	0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d,
+	0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a,
+	0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
+	0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43,
+	0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65,
+	0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f,
+	0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
+	0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x70,
+	0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70,
+	0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0d, 0x20,
+	0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c,
+	0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63,
+	0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69,
+	0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x09, 0x65, 0x78, 0x65,
+	0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
+	0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+	0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78,
+	0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b,
+	0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65,
+	0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75,
+	0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a,
+	0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
+	0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x14, 0x63,
+	0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c,
+	0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75,
+	0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, 0x72, 0x42,
+	0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
+	0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
+	0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78,
+	0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
+	0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16,
+	0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06,
+	0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91,
+	0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c,
+	0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e,
+	0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+	0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62,
+	0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65,
+	0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+	0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+	0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a,
+	0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69,
+	0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+	0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c,
+	0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x02, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+	0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65,
+	0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+	0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
+	0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
+	0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72,
+	0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28,
+	0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75,
+	0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79,
+	0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74,
+	0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e,
+	0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+	0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70,
+	0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18,
+	0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
+	0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73,
+	0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
+	0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d,
+	0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+	0x6d, 0x70, 0x4d, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46,
+	0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c,
+	0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a,
+	0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f,
+	0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
+	0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74,
+	0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+	0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69,
+	0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69,
+	0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x2a, 0x62, 0x0a,
+	0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43,
+	0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47,
+	0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02,
+	0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a,
+	0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f,
+	0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10,
+	0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
+	0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75,
+	0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
+	0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f,
+	0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63,
+	0x6c, 0x69, 0x65, 0x6e, 0x74,
+}
+
+var (
+	file_io_prometheus_client_metrics_proto_rawDescOnce sync.Once
+	file_io_prometheus_client_metrics_proto_rawDescData = file_io_prometheus_client_metrics_proto_rawDesc
+)
+
+func file_io_prometheus_client_metrics_proto_rawDescGZIP() []byte {
+	file_io_prometheus_client_metrics_proto_rawDescOnce.Do(func() {
+		file_io_prometheus_client_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_client_metrics_proto_rawDescData)
+	})
+	return file_io_prometheus_client_metrics_proto_rawDescData
+}
+
+var file_io_prometheus_client_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_io_prometheus_client_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
+var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{
+	(MetricType)(0),               // 0: io.prometheus.client.MetricType
+	(*LabelPair)(nil),             // 1: io.prometheus.client.LabelPair
+	(*Gauge)(nil),                 // 2: io.prometheus.client.Gauge
+	(*Counter)(nil),               // 3: io.prometheus.client.Counter
+	(*Quantile)(nil),              // 4: io.prometheus.client.Quantile
+	(*Summary)(nil),               // 5: io.prometheus.client.Summary
+	(*Untyped)(nil),               // 6: io.prometheus.client.Untyped
+	(*Histogram)(nil),             // 7: io.prometheus.client.Histogram
+	(*Bucket)(nil),                // 8: io.prometheus.client.Bucket
+	(*BucketSpan)(nil),            // 9: io.prometheus.client.BucketSpan
+	(*Exemplar)(nil),              // 10: io.prometheus.client.Exemplar
+	(*Metric)(nil),                // 11: io.prometheus.client.Metric
+	(*MetricFamily)(nil),          // 12: io.prometheus.client.MetricFamily
+	(*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp
+}
+var file_io_prometheus_client_metrics_proto_depIdxs = []int32{
+	10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar
+	13, // 1: io.prometheus.client.Counter.created_timestamp:type_name -> google.protobuf.Timestamp
+	4,  // 2: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile
+	13, // 3: io.prometheus.client.Summary.created_timestamp:type_name -> google.protobuf.Timestamp
+	8,  // 4: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket
+	13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp
+	9,  // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan
+	9,  // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan
+	10, // 8: io.prometheus.client.Histogram.exemplars:type_name -> io.prometheus.client.Exemplar
+	10, // 9: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
+	1,  // 10: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
+	13, // 11: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
+	1,  // 12: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
+	2,  // 13: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
+	3,  // 14: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
+	5,  // 15: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
+	6,  // 16: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
+	7,  // 17: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
+	0,  // 18: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
+	11, // 19: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
+	20, // [20:20] is the sub-list for method output_type
+	20, // [20:20] is the sub-list for method input_type
+	20, // [20:20] is the sub-list for extension type_name
+	20, // [20:20] is the sub-list for extension extendee
+	0,  // [0:20] is the sub-list for field type_name
+}
+
+func init() { file_io_prometheus_client_metrics_proto_init() }
+func file_io_prometheus_client_metrics_proto_init() {
+	if File_io_prometheus_client_metrics_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_io_prometheus_client_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*LabelPair); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Gauge); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Counter); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Quantile); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Summary); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Untyped); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Histogram); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Bucket); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*BucketSpan); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Exemplar); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Metric); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*MetricFamily); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_io_prometheus_client_metrics_proto_rawDesc,
+			NumEnums:      1,
+			NumMessages:   12,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_io_prometheus_client_metrics_proto_goTypes,
+		DependencyIndexes: file_io_prometheus_client_metrics_proto_depIdxs,
+		EnumInfos:         file_io_prometheus_client_metrics_proto_enumTypes,
+		MessageInfos:      file_io_prometheus_client_metrics_proto_msgTypes,
+	}.Build()
+	File_io_prometheus_client_metrics_proto = out.File
+	file_io_prometheus_client_metrics_proto_rawDesc = nil
+	file_io_prometheus_client_metrics_proto_goTypes = nil
+	file_io_prometheus_client_metrics_proto_depIdxs = nil
 }
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
index 7657f84..7b76237 100644
--- a/vendor/github.com/prometheus/common/expfmt/decode.go
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -14,6 +14,7 @@
 package expfmt
 
 import (
+	"bufio"
 	"fmt"
 	"io"
 	"math"
@@ -21,8 +22,8 @@
 	"net/http"
 
 	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/encoding/protodelim"
 
-	"github.com/matttproud/golang_protobuf_extensions/pbutil"
 	"github.com/prometheus/common/model"
 )
 
@@ -69,28 +70,45 @@
 	return FmtUnknown
 }
 
-// NewDecoder returns a new decoder based on the given input format.
-// If the input format does not imply otherwise, a text format decoder is returned.
+// NewDecoder returns a new decoder based on the given input format. Metric
+// names are validated based on the provided Format -- if the format requires
+// escaping, raditional Prometheues validity checking is used. Otherwise, names
+// are checked for UTF-8 validity. Supported formats include delimited protobuf
+// and Prometheus text format.  For historical reasons, this decoder fallbacks
+// to classic text decoding for any other format. This decoder does not fully
+// support OpenMetrics although it may often succeed due to the similarities
+// between the formats. This decoder may not support the latest features of
+// Prometheus text format and is not intended for high-performance applications.
+// See: https://github.com/prometheus/common/issues/812
 func NewDecoder(r io.Reader, format Format) Decoder {
-	switch format {
-	case FmtProtoDelim:
-		return &protoDecoder{r: r}
+	scheme := model.LegacyValidation
+	if format.ToEscapingScheme() == model.NoEscaping {
+		scheme = model.UTF8Validation
 	}
-	return &textDecoder{r: r}
+	switch format.FormatType() {
+	case TypeProtoDelim:
+		return &protoDecoder{r: bufio.NewReader(r), s: scheme}
+	case TypeProtoText, TypeProtoCompact:
+		return &errDecoder{err: fmt.Errorf("format %s not supported for decoding", format)}
+	}
+	return &textDecoder{r: r, s: scheme}
 }
 
 // protoDecoder implements the Decoder interface for protocol buffers.
 type protoDecoder struct {
-	r io.Reader
+	r protodelim.Reader
+	s model.ValidationScheme
 }
 
 // Decode implements the Decoder interface.
 func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
-	_, err := pbutil.ReadDelimited(d.r, v)
-	if err != nil {
+	opts := protodelim.UnmarshalOptions{
+		MaxSize: -1,
+	}
+	if err := opts.UnmarshalFrom(d.r, v); err != nil {
 		return err
 	}
-	if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
+	if !d.s.IsValidMetricName(v.GetName()) {
 		return fmt.Errorf("invalid metric name %q", v.GetName())
 	}
 	for _, m := range v.GetMetric() {
@@ -104,7 +122,7 @@
 			if !model.LabelValue(l.GetValue()).IsValid() {
 				return fmt.Errorf("invalid label value %q", l.GetValue())
 			}
-			if !model.LabelName(l.GetName()).IsValid() {
+			if !d.s.IsValidLabelName(l.GetName()) {
 				return fmt.Errorf("invalid label name %q", l.GetName())
 			}
 		}
@@ -112,35 +130,44 @@
 	return nil
 }
 
+// errDecoder is an error-state decoder that always returns the same error.
+type errDecoder struct {
+	err error
+}
+
+func (d *errDecoder) Decode(*dto.MetricFamily) error {
+	return d.err
+}
+
 // textDecoder implements the Decoder interface for the text protocol.
 type textDecoder struct {
 	r    io.Reader
-	p    TextParser
-	fams []*dto.MetricFamily
+	fams map[string]*dto.MetricFamily
+	s    model.ValidationScheme
+	err  error
 }
 
 // Decode implements the Decoder interface.
 func (d *textDecoder) Decode(v *dto.MetricFamily) error {
-	// TODO(fabxc): Wrap this as a line reader to make streaming safer.
-	if len(d.fams) == 0 {
-		// No cached metric families, read everything and parse metrics.
-		fams, err := d.p.TextToMetricFamilies(d.r)
-		if err != nil {
-			return err
-		}
-		if len(fams) == 0 {
-			return io.EOF
-		}
-		d.fams = make([]*dto.MetricFamily, 0, len(fams))
-		for _, f := range fams {
-			d.fams = append(d.fams, f)
+	if d.err == nil {
+		// Read all metrics in one shot.
+		p := NewTextParser(d.s)
+		d.fams, d.err = p.TextToMetricFamilies(d.r)
+		// If we don't get an error, store io.EOF for the end.
+		if d.err == nil {
+			d.err = io.EOF
 		}
 	}
-
-	*v = *d.fams[0]
-	d.fams = d.fams[1:]
-
-	return nil
+	// Pick off one MetricFamily per Decode until there's nothing left.
+	for key, fam := range d.fams {
+		v.Name = fam.Name
+		v.Help = fam.Help
+		v.Type = fam.Type
+		v.Metric = fam.Metric
+		delete(d.fams, key)
+		return nil
+	}
+	return d.err
 }
 
 // SampleDecoder wraps a Decoder to extract samples from the metric families
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
index bd4e347..73c24df 100644
--- a/vendor/github.com/prometheus/common/expfmt/encode.go
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -18,11 +18,12 @@
 	"io"
 	"net/http"
 
-	"github.com/golang/protobuf/proto"
-	"github.com/matttproud/golang_protobuf_extensions/pbutil"
-	"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
-
+	"github.com/munnerz/goautoneg"
 	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/encoding/protodelim"
+	"google.golang.org/protobuf/encoding/prototext"
+
+	"github.com/prometheus/common/model"
 )
 
 // Encoder types encode metric families into an underlying wire protocol.
@@ -58,25 +59,34 @@
 // appropriate accepted type is found, FmtText is returned (which is the
 // Prometheus text format). This function will never negotiate FmtOpenMetrics,
 // as the support is still experimental. To include the option to negotiate
-// FmtOpenMetrics, use NegotiateOpenMetrics.
+// FmtOpenMetrics, use NegotiateIncludingOpenMetrics.
 func Negotiate(h http.Header) Format {
+	escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
 	for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+		if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
+			switch Format(escapeParam) {
+			case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
+				escapingScheme = Format("; escaping=" + escapeParam)
+			default:
+				// If the escaping parameter is unknown, ignore it.
+			}
+		}
 		ver := ac.Params["version"]
 		if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
 			switch ac.Params["encoding"] {
 			case "delimited":
-				return FmtProtoDelim
+				return FmtProtoDelim + escapingScheme
 			case "text":
-				return FmtProtoText
+				return FmtProtoText + escapingScheme
 			case "compact-text":
-				return FmtProtoCompact
+				return FmtProtoCompact + escapingScheme
 			}
 		}
 		if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
-			return FmtText
+			return FmtText + escapingScheme
 		}
 	}
-	return FmtText
+	return FmtText + escapingScheme
 }
 
 // NegotiateIncludingOpenMetrics works like Negotiate but includes
@@ -84,26 +94,40 @@
 // temporary and will disappear once FmtOpenMetrics is fully supported and as
 // such may be negotiated by the normal Negotiate function.
 func NegotiateIncludingOpenMetrics(h http.Header) Format {
+	escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
 	for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+		if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
+			switch Format(escapeParam) {
+			case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
+				escapingScheme = Format("; escaping=" + escapeParam)
+			default:
+				// If the escaping parameter is unknown, ignore it.
+			}
+		}
 		ver := ac.Params["version"]
 		if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
 			switch ac.Params["encoding"] {
 			case "delimited":
-				return FmtProtoDelim
+				return FmtProtoDelim + escapingScheme
 			case "text":
-				return FmtProtoText
+				return FmtProtoText + escapingScheme
 			case "compact-text":
-				return FmtProtoCompact
+				return FmtProtoCompact + escapingScheme
 			}
 		}
 		if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
-			return FmtText
+			return FmtText + escapingScheme
 		}
-		if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") {
-			return FmtOpenMetrics
+		if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
+			switch ver {
+			case OpenMetricsVersion_1_0_0:
+				return FmtOpenMetrics_1_0_0 + escapingScheme
+			default:
+				return FmtOpenMetrics_0_0_1 + escapingScheme
+			}
 		}
 	}
-	return FmtText
+	return FmtText + escapingScheme
 }
 
 // NewEncoder returns a new encoder based on content type negotiation. All
@@ -112,44 +136,54 @@
 // for FmtOpenMetrics, but a future (breaking) release will add the Close method
 // to the Encoder interface directly. The current version of the Encoder
 // interface is kept for backwards compatibility.
-func NewEncoder(w io.Writer, format Format) Encoder {
-	switch format {
-	case FmtProtoDelim:
+// In cases where the Format does not allow for UTF-8 names, the global
+// NameEscapingScheme will be applied.
+//
+// NewEncoder can be called with additional options to customize the OpenMetrics text output.
+// For example:
+// NewEncoder(w, FmtOpenMetrics_1_0_0, WithCreatedLines())
+//
+// Extra options are ignored for all other formats.
+func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder {
+	escapingScheme := format.ToEscapingScheme()
+
+	switch format.FormatType() {
+	case TypeProtoDelim:
 		return encoderCloser{
 			encode: func(v *dto.MetricFamily) error {
-				_, err := pbutil.WriteDelimited(w, v)
+				_, err := protodelim.MarshalTo(w, model.EscapeMetricFamily(v, escapingScheme))
 				return err
 			},
 			close: func() error { return nil },
 		}
-	case FmtProtoCompact:
+	case TypeProtoCompact:
 		return encoderCloser{
 			encode: func(v *dto.MetricFamily) error {
-				_, err := fmt.Fprintln(w, v.String())
+				_, err := fmt.Fprintln(w, model.EscapeMetricFamily(v, escapingScheme).String())
 				return err
 			},
 			close: func() error { return nil },
 		}
-	case FmtProtoText:
+	case TypeProtoText:
 		return encoderCloser{
 			encode: func(v *dto.MetricFamily) error {
-				_, err := fmt.Fprintln(w, proto.MarshalTextString(v))
+				_, err := fmt.Fprintln(w, prototext.Format(model.EscapeMetricFamily(v, escapingScheme)))
 				return err
 			},
 			close: func() error { return nil },
 		}
-	case FmtText:
+	case TypeTextPlain:
 		return encoderCloser{
 			encode: func(v *dto.MetricFamily) error {
-				_, err := MetricFamilyToText(w, v)
+				_, err := MetricFamilyToText(w, model.EscapeMetricFamily(v, escapingScheme))
 				return err
 			},
 			close: func() error { return nil },
 		}
-	case FmtOpenMetrics:
+	case TypeOpenMetrics:
 		return encoderCloser{
 			encode: func(v *dto.MetricFamily) error {
-				_, err := MetricFamilyToOpenMetrics(w, v)
+				_, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme), options...)
 				return err
 			},
 			close: func() error {
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
index 0f176fa..c34c7de 100644
--- a/vendor/github.com/prometheus/common/expfmt/expfmt.go
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -14,28 +14,198 @@
 // Package expfmt contains tools for reading and writing Prometheus metrics.
 package expfmt
 
+import (
+	"errors"
+	"strings"
+
+	"github.com/prometheus/common/model"
+)
+
 // Format specifies the HTTP content type of the different wire protocols.
 type Format string
 
-// Constants to assemble the Content-Type values for the different wire protocols.
+// Constants to assemble the Content-Type values for the different wire
+// protocols. The Content-Type strings here are all for the legacy exposition
+// formats, where valid characters for metric names and label names are limited.
+// Support for arbitrary UTF-8 characters in those names is already partially
+// implemented in this module (see model.ValidationScheme), but to actually use
+// it on the wire, new content-type strings will have to be agreed upon and
+// added here.
 const (
-	TextVersion        = "0.0.4"
-	ProtoType          = `application/vnd.google.protobuf`
-	ProtoProtocol      = `io.prometheus.client.MetricFamily`
-	ProtoFmt           = ProtoType + "; proto=" + ProtoProtocol + ";"
-	OpenMetricsType    = `application/openmetrics-text`
-	OpenMetricsVersion = "0.0.1"
+	TextVersion   = "0.0.4"
+	ProtoType     = `application/vnd.google.protobuf`
+	ProtoProtocol = `io.prometheus.client.MetricFamily`
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
+	ProtoFmt        = ProtoType + "; proto=" + ProtoProtocol + ";"
+	OpenMetricsType = `application/openmetrics-text`
+	//nolint:revive // Allow for underscores.
+	OpenMetricsVersion_0_0_1 = "0.0.1"
+	//nolint:revive // Allow for underscores.
+	OpenMetricsVersion_1_0_0 = "1.0.0"
 
-	// The Content-Type values for the different wire protocols.
-	FmtUnknown      Format = `<unknown>`
-	FmtText         Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
-	FmtProtoDelim   Format = ProtoFmt + ` encoding=delimited`
-	FmtProtoText    Format = ProtoFmt + ` encoding=text`
+	// The Content-Type values for the different wire protocols. Do not do direct
+	// comparisons to these constants, instead use the comparison functions.
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead.
+	FmtUnknown Format = `<unknown>`
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead.
+	FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead.
+	FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead.
+	FmtProtoText Format = ProtoFmt + ` encoding=text`
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
 	FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
-	FmtOpenMetrics  Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8`
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
+	//nolint:revive // Allow for underscores.
+	FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
+	//nolint:revive // Allow for underscores.
+	FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
 )
 
 const (
 	hdrContentType = "Content-Type"
 	hdrAccept      = "Accept"
 )
+
+// FormatType is a Go enum representing the overall category for the given
+// Format. As the number of Format permutations increases, doing basic string
+// comparisons are not feasible, so this enum captures the most useful
+// high-level attribute of the Format string.
+type FormatType int
+
+const (
+	TypeUnknown FormatType = iota
+	TypeProtoCompact
+	TypeProtoDelim
+	TypeProtoText
+	TypeTextPlain
+	TypeOpenMetrics
+)
+
+// NewFormat generates a new Format from the type provided. Mostly used for
+// tests, most Formats should be generated as part of content negotiation in
+// encode.go. If a type has more than one version, the latest version will be
+// returned.
+func NewFormat(t FormatType) Format {
+	switch t {
+	case TypeProtoCompact:
+		return FmtProtoCompact
+	case TypeProtoDelim:
+		return FmtProtoDelim
+	case TypeProtoText:
+		return FmtProtoText
+	case TypeTextPlain:
+		return FmtText
+	case TypeOpenMetrics:
+		return FmtOpenMetrics_1_0_0
+	default:
+		return FmtUnknown
+	}
+}
+
+// NewOpenMetricsFormat generates a new OpenMetrics format matching the
+// specified version number.
+func NewOpenMetricsFormat(version string) (Format, error) {
+	if version == OpenMetricsVersion_0_0_1 {
+		return FmtOpenMetrics_0_0_1, nil
+	}
+	if version == OpenMetricsVersion_1_0_0 {
+		return FmtOpenMetrics_1_0_0, nil
+	}
+	return FmtUnknown, errors.New("unknown open metrics version string")
+}
+
+// WithEscapingScheme returns a copy of Format with the specified escaping
+// scheme appended to the end. If an escaping scheme already exists it is
+// removed.
+func (f Format) WithEscapingScheme(s model.EscapingScheme) Format {
+	var terms []string
+	for _, p := range strings.Split(string(f), ";") {
+		toks := strings.Split(p, "=")
+		if len(toks) != 2 {
+			trimmed := strings.TrimSpace(p)
+			if len(trimmed) > 0 {
+				terms = append(terms, trimmed)
+			}
+			continue
+		}
+		key := strings.TrimSpace(toks[0])
+		if key != model.EscapingKey {
+			terms = append(terms, strings.TrimSpace(p))
+		}
+	}
+	terms = append(terms, model.EscapingKey+"="+s.String())
+	return Format(strings.Join(terms, "; "))
+}
+
+// FormatType deduces an overall FormatType for the given format.
+func (f Format) FormatType() FormatType {
+	toks := strings.Split(string(f), ";")
+	params := make(map[string]string)
+	for i, t := range toks {
+		if i == 0 {
+			continue
+		}
+		args := strings.Split(t, "=")
+		if len(args) != 2 {
+			continue
+		}
+		params[strings.TrimSpace(args[0])] = strings.TrimSpace(args[1])
+	}
+
+	switch strings.TrimSpace(toks[0]) {
+	case ProtoType:
+		if params["proto"] != ProtoProtocol {
+			return TypeUnknown
+		}
+		switch params["encoding"] {
+		case "delimited":
+			return TypeProtoDelim
+		case "text":
+			return TypeProtoText
+		case "compact-text":
+			return TypeProtoCompact
+		default:
+			return TypeUnknown
+		}
+	case OpenMetricsType:
+		if params["charset"] != "utf-8" {
+			return TypeUnknown
+		}
+		return TypeOpenMetrics
+	case "text/plain":
+		v, ok := params["version"]
+		if !ok {
+			return TypeTextPlain
+		}
+		if v == TextVersion {
+			return TypeTextPlain
+		}
+		return TypeUnknown
+	default:
+		return TypeUnknown
+	}
+}
+
+// ToEscapingScheme returns an EscapingScheme depending on the Format. Iff the
+// Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid
+// "escaping" term exists, that will be used. Otherwise, the global default will
+// be returned.
+func (f Format) ToEscapingScheme() model.EscapingScheme {
+	for _, p := range strings.Split(string(f), ";") {
+		toks := strings.Split(p, "=")
+		if len(toks) != 2 {
+			continue
+		}
+		key, value := strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1])
+		if key == model.EscapingKey {
+			scheme, err := model.ToEscapingScheme(value)
+			if err != nil {
+				return model.NameEscapingScheme
+			}
+			return scheme
+		}
+	}
+	return model.NameEscapingScheme
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
index dc2eede..0290f6a 100644
--- a/vendor/github.com/prometheus/common/expfmt/fuzz.go
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -12,22 +12,26 @@
 // limitations under the License.
 
 // Build only when actually fuzzing
+//go:build gofuzz
 // +build gofuzz
 
 package expfmt
 
-import "bytes"
+import (
+	"bytes"
+
+	"github.com/prometheus/common/model"
+)
 
 // Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
 //
-//     go-fuzz-build github.com/prometheus/common/expfmt
-//     go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
+//	go-fuzz-build github.com/prometheus/common/expfmt
+//	go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
 //
 // Further input samples should go in the folder fuzz/corpus.
 func Fuzz(in []byte) int {
-	parser := TextParser{}
+	parser := NewTextParser(model.UTF8Validation)
 	_, err := parser.TextToMetricFamilies(bytes.NewReader(in))
-
 	if err != nil {
 		return 0
 	}
diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
index 8a9313a..8dbf6d0 100644
--- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
@@ -22,12 +22,46 @@
 	"strconv"
 	"strings"
 
-	"github.com/golang/protobuf/ptypes"
-	"github.com/prometheus/common/model"
-
 	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/types/known/timestamppb"
+
+	"github.com/prometheus/common/model"
 )
 
+type encoderOption struct {
+	withCreatedLines bool
+	withUnit         bool
+}
+
+type EncoderOption func(*encoderOption)
+
+// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder
+// to include _created lines (See
+// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1).
+// Created timestamps can improve the accuracy of series reset detection, but
+// come with a bandwidth cost.
+//
+// At the time of writing, created timestamp ingestion is still experimental in
+// Prometheus and need to be enabled with the feature-flag
+// `--feature-flag=created-timestamp-zero-ingestion`, and breaking changes are
+// still possible. Therefore, it is recommended to use this feature with caution.
+func WithCreatedLines() EncoderOption {
+	return func(t *encoderOption) {
+		t.withCreatedLines = true
+	}
+}
+
+// WithUnit is an EncoderOption enabling a set unit to be written to the output
+// and to be added to the metric name, if it's not there already, as a suffix.
+// Without opting in this way, the unit will not be added to the metric name and,
+// on top of that, the unit will not be passed onto the output, even if it
+// were declared in the *dto.MetricFamily struct, i.e. even if in.Unit !=nil.
+func WithUnit() EncoderOption {
+	return func(t *encoderOption) {
+		t.withUnit = true
+	}
+}
+
 // MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
 // OpenMetrics text format and writes the resulting lines to 'out'. It returns
 // the number of bytes written and any error encountered. The output will have
@@ -36,6 +70,18 @@
 // sanity checks. If the input contains duplicate metrics or invalid metric or
 // label names, the conversion will result in invalid text format output.
 //
+// If metric names conform to the legacy validation pattern, they will be placed
+// outside the brackets in the traditional way, like `foo{}`. If the metric name
+// fails the legacy validation check, it will be placed quoted inside the
+// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
+// Similar to metric names, if label names conform to the legacy validation
+// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
+// name fails the legacy validation check, it will be quoted:
+// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
 // This function fulfills the type 'expfmt.encoder'.
 //
 // Note that OpenMetrics requires a final `# EOF` line. Since this function acts
@@ -47,21 +93,35 @@
 // missing features and peculiarities to avoid complications when switching from
 // Prometheus to OpenMetrics or vice versa:
 //
-// - Counters are expected to have the `_total` suffix in their metric name. In
-//   the output, the suffix will be truncated from the `# TYPE` and `# HELP`
-//   line. A counter with a missing `_total` suffix is not an error. However,
-//   its type will be set to `unknown` in that case to avoid invalid OpenMetrics
-//   output.
+//   - Counters are expected to have the `_total` suffix in their metric name. In
+//     the output, the suffix will be truncated from the `# TYPE`, `# HELP` and `# UNIT`
+//     lines. A counter with a missing `_total` suffix is not an error. However,
+//     its type will be set to `unknown` in that case to avoid invalid OpenMetrics
+//     output.
 //
-// - No support for the following (optional) features: `# UNIT` line, `_created`
-//   line, info type, stateset type, gaugehistogram type.
+//   - According to the OM specs, the `# UNIT` line is optional, but if populated,
+//     the unit has to be present in the metric name as its suffix:
+//     (see https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#unit).
+//     However, in order to accommodate any potential scenario where such a change in the
+//     metric name is not desirable, the users are here given the choice of either explicitly
+//     opt in, in case they wish for the unit to be included in the output AND in the metric name
+//     as a suffix (see the description of the WithUnit function above),
+//     or not to opt in, in case they don't want for any of that to happen.
 //
-// - The size of exemplar labels is not checked (i.e. it's possible to create
-//   exemplars that are larger than allowed by the OpenMetrics specification).
+//   - No support for the following (optional) features: info type,
+//     stateset type, gaugehistogram type.
 //
-// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
-//   with a `NaN` value.)
-func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
+//   - The size of exemplar labels is not checked (i.e. it's possible to create
+//     exemplars that are larger than allowed by the OpenMetrics specification).
+//
+//   - The value of Counters is not checked. (OpenMetrics doesn't allow counters
+//     with a `NaN` value.)
+func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...EncoderOption) (written int, err error) {
+	toOM := encoderOption{}
+	for _, option := range options {
+		option(&toOM)
+	}
+
 	name := in.GetName()
 	if name == "" {
 		return 0, fmt.Errorf("MetricFamily has no name: %s", in)
@@ -84,12 +144,15 @@
 	}
 
 	var (
-		n          int
-		metricType = in.GetType()
-		shortName  = name
+		n             int
+		metricType    = in.GetType()
+		compliantName = name
 	)
-	if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") {
-		shortName = name[:len(name)-6]
+	if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") {
+		compliantName = name[:len(name)-6]
+	}
+	if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, "_"+*in.Unit) {
+		compliantName = compliantName + "_" + *in.Unit
 	}
 
 	// Comments, first HELP, then TYPE.
@@ -99,7 +162,7 @@
 		if err != nil {
 			return
 		}
-		n, err = w.WriteString(shortName)
+		n, err = writeName(w, compliantName)
 		written += n
 		if err != nil {
 			return
@@ -125,7 +188,7 @@
 	if err != nil {
 		return
 	}
-	n, err = w.WriteString(shortName)
+	n, err = writeName(w, compliantName)
 	written += n
 	if err != nil {
 		return
@@ -152,55 +215,89 @@
 	if err != nil {
 		return
 	}
+	if toOM.withUnit && in.Unit != nil {
+		n, err = w.WriteString("# UNIT ")
+		written += n
+		if err != nil {
+			return
+		}
+		n, err = writeName(w, compliantName)
+		written += n
+		if err != nil {
+			return
+		}
+
+		err = w.WriteByte(' ')
+		written++
+		if err != nil {
+			return
+		}
+		n, err = writeEscapedString(w, *in.Unit, true)
+		written += n
+		if err != nil {
+			return
+		}
+		err = w.WriteByte('\n')
+		written++
+		if err != nil {
+			return
+		}
+	}
+
+	var createdTsBytesWritten int
 
 	// Finally the samples, one line for each.
+	if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") {
+		compliantName += "_total"
+	}
 	for _, metric := range in.Metric {
 		switch metricType {
 		case dto.MetricType_COUNTER:
 			if metric.Counter == nil {
 				return written, fmt.Errorf(
-					"expected counter in metric %s %s", name, metric,
+					"expected counter in metric %s %s", compliantName, metric,
 				)
 			}
-			// Note that we have ensured above that either the name
-			// ends on `_total` or that the rendered type is
-			// `unknown`. Therefore, no `_total` must be added here.
 			n, err = writeOpenMetricsSample(
-				w, name, "", metric, "", 0,
+				w, compliantName, "", metric, "", 0,
 				metric.Counter.GetValue(), 0, false,
 				metric.Counter.Exemplar,
 			)
+			if toOM.withCreatedLines && metric.Counter.CreatedTimestamp != nil {
+				createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "_total", metric, "", 0, metric.Counter.GetCreatedTimestamp())
+				n += createdTsBytesWritten
+			}
 		case dto.MetricType_GAUGE:
 			if metric.Gauge == nil {
 				return written, fmt.Errorf(
-					"expected gauge in metric %s %s", name, metric,
+					"expected gauge in metric %s %s", compliantName, metric,
 				)
 			}
 			n, err = writeOpenMetricsSample(
-				w, name, "", metric, "", 0,
+				w, compliantName, "", metric, "", 0,
 				metric.Gauge.GetValue(), 0, false,
 				nil,
 			)
 		case dto.MetricType_UNTYPED:
 			if metric.Untyped == nil {
 				return written, fmt.Errorf(
-					"expected untyped in metric %s %s", name, metric,
+					"expected untyped in metric %s %s", compliantName, metric,
 				)
 			}
 			n, err = writeOpenMetricsSample(
-				w, name, "", metric, "", 0,
+				w, compliantName, "", metric, "", 0,
 				metric.Untyped.GetValue(), 0, false,
 				nil,
 			)
 		case dto.MetricType_SUMMARY:
 			if metric.Summary == nil {
 				return written, fmt.Errorf(
-					"expected summary in metric %s %s", name, metric,
+					"expected summary in metric %s %s", compliantName, metric,
 				)
 			}
 			for _, q := range metric.Summary.Quantile {
 				n, err = writeOpenMetricsSample(
-					w, name, "", metric,
+					w, compliantName, "", metric,
 					model.QuantileLabel, q.GetQuantile(),
 					q.GetValue(), 0, false,
 					nil,
@@ -211,7 +308,7 @@
 				}
 			}
 			n, err = writeOpenMetricsSample(
-				w, name, "_sum", metric, "", 0,
+				w, compliantName, "_sum", metric, "", 0,
 				metric.Summary.GetSampleSum(), 0, false,
 				nil,
 			)
@@ -220,20 +317,24 @@
 				return
 			}
 			n, err = writeOpenMetricsSample(
-				w, name, "_count", metric, "", 0,
+				w, compliantName, "_count", metric, "", 0,
 				0, metric.Summary.GetSampleCount(), true,
 				nil,
 			)
+			if toOM.withCreatedLines && metric.Summary.CreatedTimestamp != nil {
+				createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp())
+				n += createdTsBytesWritten
+			}
 		case dto.MetricType_HISTOGRAM:
 			if metric.Histogram == nil {
 				return written, fmt.Errorf(
-					"expected histogram in metric %s %s", name, metric,
+					"expected histogram in metric %s %s", compliantName, metric,
 				)
 			}
 			infSeen := false
 			for _, b := range metric.Histogram.Bucket {
 				n, err = writeOpenMetricsSample(
-					w, name, "_bucket", metric,
+					w, compliantName, "_bucket", metric,
 					model.BucketLabel, b.GetUpperBound(),
 					0, b.GetCumulativeCount(), true,
 					b.Exemplar,
@@ -248,7 +349,7 @@
 			}
 			if !infSeen {
 				n, err = writeOpenMetricsSample(
-					w, name, "_bucket", metric,
+					w, compliantName, "_bucket", metric,
 					model.BucketLabel, math.Inf(+1),
 					0, metric.Histogram.GetSampleCount(), true,
 					nil,
@@ -259,7 +360,7 @@
 				}
 			}
 			n, err = writeOpenMetricsSample(
-				w, name, "_sum", metric, "", 0,
+				w, compliantName, "_sum", metric, "", 0,
 				metric.Histogram.GetSampleSum(), 0, false,
 				nil,
 			)
@@ -268,13 +369,17 @@
 				return
 			}
 			n, err = writeOpenMetricsSample(
-				w, name, "_count", metric, "", 0,
+				w, compliantName, "_count", metric, "", 0,
 				0, metric.Histogram.GetSampleCount(), true,
 				nil,
 			)
+			if toOM.withCreatedLines && metric.Histogram.CreatedTimestamp != nil {
+				createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Histogram.GetCreatedTimestamp())
+				n += createdTsBytesWritten
+			}
 		default:
 			return written, fmt.Errorf(
-				"unexpected type in metric %s %s", name, metric,
+				"unexpected type in metric %s %s", compliantName, metric,
 			)
 		}
 		written += n
@@ -304,21 +409,9 @@
 	floatValue float64, intValue uint64, useIntValue bool,
 	exemplar *dto.Exemplar,
 ) (int, error) {
-	var written int
-	n, err := w.WriteString(name)
-	written += n
-	if err != nil {
-		return written, err
-	}
-	if suffix != "" {
-		n, err = w.WriteString(suffix)
-		written += n
-		if err != nil {
-			return written, err
-		}
-	}
-	n, err = writeOpenMetricsLabelPairs(
-		w, metric.Label, additionalLabelName, additionalLabelValue,
+	written := 0
+	n, err := writeOpenMetricsNameAndLabelPairs(
+		w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
 	)
 	written += n
 	if err != nil {
@@ -351,7 +444,7 @@
 			return written, err
 		}
 	}
-	if exemplar != nil {
+	if exemplar != nil && len(exemplar.Label) > 0 {
 		n, err = writeExemplar(w, exemplar)
 		written += n
 		if err != nil {
@@ -366,27 +459,58 @@
 	return written, nil
 }
 
-// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float
-// in OpenMetrics style.
-func writeOpenMetricsLabelPairs(
+// writeOpenMetricsNameAndLabelPairs works like writeOpenMetricsSample but
+// formats the float in OpenMetrics style.
+func writeOpenMetricsNameAndLabelPairs(
 	w enhancedWriter,
+	name string,
 	in []*dto.LabelPair,
 	additionalLabelName string, additionalLabelValue float64,
 ) (int, error) {
-	if len(in) == 0 && additionalLabelName == "" {
-		return 0, nil
-	}
 	var (
-		written   int
-		separator byte = '{'
+		written            int
+		separator          byte = '{'
+		metricInsideBraces      = false
 	)
+
+	if name != "" {
+		// If the name does not pass the legacy validity check, we must put the
+		// metric name inside the braces, quoted.
+		if !model.LegacyValidation.IsValidMetricName(name) {
+			metricInsideBraces = true
+			err := w.WriteByte(separator)
+			written++
+			if err != nil {
+				return written, err
+			}
+			separator = ','
+		}
+
+		n, err := writeName(w, name)
+		written += n
+		if err != nil {
+			return written, err
+		}
+	}
+
+	if len(in) == 0 && additionalLabelName == "" {
+		if metricInsideBraces {
+			err := w.WriteByte('}')
+			written++
+			if err != nil {
+				return written, err
+			}
+		}
+		return written, nil
+	}
+
 	for _, lp := range in {
 		err := w.WriteByte(separator)
 		written++
 		if err != nil {
 			return written, err
 		}
-		n, err := w.WriteString(lp.GetName())
+		n, err := writeName(w, lp.GetName())
 		written += n
 		if err != nil {
 			return written, err
@@ -443,6 +567,49 @@
 	return written, nil
 }
 
+// writeOpenMetricsCreated writes the created timestamp for a single time series
+// following OpenMetrics text format to w, given the metric name, the metric proto
+// message itself, optionally a suffix to be removed, e.g. '_total' for counters,
+// an additional label name with a float64 value (use empty string as label name if
+// not required) and the timestamp that represents the created timestamp.
+// The function returns the number of bytes written and any error encountered.
+func writeOpenMetricsCreated(w enhancedWriter,
+	name, suffixToTrim string, metric *dto.Metric,
+	additionalLabelName string, additionalLabelValue float64,
+	createdTimestamp *timestamppb.Timestamp,
+) (int, error) {
+	written := 0
+	n, err := writeOpenMetricsNameAndLabelPairs(
+		w, strings.TrimSuffix(name, suffixToTrim)+"_created", metric.Label, additionalLabelName, additionalLabelValue,
+	)
+	written += n
+	if err != nil {
+		return written, err
+	}
+
+	err = w.WriteByte(' ')
+	written++
+	if err != nil {
+		return written, err
+	}
+
+	// TODO(beorn7): Format this directly from components of ts to
+	// avoid overflow/underflow and precision issues of the float
+	// conversion.
+	n, err = writeOpenMetricsFloat(w, float64(createdTimestamp.AsTime().UnixNano())/1e9)
+	written += n
+	if err != nil {
+		return written, err
+	}
+
+	err = w.WriteByte('\n')
+	written++
+	if err != nil {
+		return written, err
+	}
+	return written, nil
+}
+
 // writeExemplar writes the provided exemplar in OpenMetrics format to w. The
 // function returns the number of bytes written and any error encountered.
 func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
@@ -452,7 +619,7 @@
 	if err != nil {
 		return written, err
 	}
-	n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0)
+	n, err = writeOpenMetricsNameAndLabelPairs(w, "", e.Label, "", 0)
 	written += n
 	if err != nil {
 		return written, err
@@ -473,10 +640,11 @@
 		if err != nil {
 			return written, err
 		}
-		ts, err := ptypes.Timestamp((*e).Timestamp)
+		err = e.Timestamp.CheckValid()
 		if err != nil {
 			return written, err
 		}
+		ts := e.Timestamp.AsTime()
 		// TODO(beorn7): Format this directly from components of ts to
 		// avoid overflow/underflow and precision issues of the float
 		// conversion.
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
index 5ba503b..c4e9c1b 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -17,15 +17,14 @@
 	"bufio"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"math"
 	"strconv"
 	"strings"
 	"sync"
 
-	"github.com/prometheus/common/model"
-
 	dto "github.com/prometheus/client_model/go"
+
+	"github.com/prometheus/common/model"
 )
 
 // enhancedWriter has all the enhanced write functions needed here. bufio.Writer
@@ -44,7 +43,7 @@
 var (
 	bufPool = sync.Pool{
 		New: func() interface{} {
-			return bufio.NewWriter(ioutil.Discard)
+			return bufio.NewWriter(io.Discard)
 		},
 	}
 	numBufPool = sync.Pool{
@@ -63,6 +62,18 @@
 // contains duplicate metrics or invalid metric or label names, the conversion
 // will result in invalid text format output.
 //
+// If metric names conform to the legacy validation pattern, they will be placed
+// outside the brackets in the traditional way, like `foo{}`. If the metric name
+// fails the legacy validation check, it will be placed quoted inside the
+// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
+// Similar to metric names, if label names conform to the legacy validation
+// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
+// name fails the legacy validation check, it will be quoted:
+// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
 // This method fulfills the type 'prometheus.encoder'.
 func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
 	// Fail-fast checks.
@@ -99,7 +110,7 @@
 		if err != nil {
 			return
 		}
-		n, err = w.WriteString(name)
+		n, err = writeName(w, name)
 		written += n
 		if err != nil {
 			return
@@ -125,7 +136,7 @@
 	if err != nil {
 		return
 	}
-	n, err = w.WriteString(name)
+	n, err = writeName(w, name)
 	written += n
 	if err != nil {
 		return
@@ -281,21 +292,9 @@
 	additionalLabelName string, additionalLabelValue float64,
 	value float64,
 ) (int, error) {
-	var written int
-	n, err := w.WriteString(name)
-	written += n
-	if err != nil {
-		return written, err
-	}
-	if suffix != "" {
-		n, err = w.WriteString(suffix)
-		written += n
-		if err != nil {
-			return written, err
-		}
-	}
-	n, err = writeLabelPairs(
-		w, metric.Label, additionalLabelName, additionalLabelValue,
+	written := 0
+	n, err := writeNameAndLabelPairs(
+		w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
 	)
 	written += n
 	if err != nil {
@@ -331,32 +330,64 @@
 	return written, nil
 }
 
-// writeLabelPairs converts a slice of LabelPair proto messages plus the
-// explicitly given additional label pair into text formatted as required by the
-// text format and writes it to 'w'. An empty slice in combination with an empty
-// string 'additionalLabelName' results in nothing being written. Otherwise, the
-// label pairs are written, escaped as required by the text format, and enclosed
-// in '{...}'. The function returns the number of bytes written and any error
-// encountered.
-func writeLabelPairs(
+// writeNameAndLabelPairs converts a slice of LabelPair proto messages plus the
+// explicitly given metric name and additional label pair into text formatted as
+// required by the text format and writes it to 'w'. An empty slice in
+// combination with an empty string 'additionalLabelName' results in nothing
+// being written. Otherwise, the label pairs are written, escaped as required by
+// the text format, and enclosed in '{...}'. The function returns the number of
+// bytes written and any error encountered. If the metric name is not
+// legacy-valid, it will be put inside the brackets as well. Legacy-invalid
+// label names will also be quoted.
+func writeNameAndLabelPairs(
 	w enhancedWriter,
+	name string,
 	in []*dto.LabelPair,
 	additionalLabelName string, additionalLabelValue float64,
 ) (int, error) {
-	if len(in) == 0 && additionalLabelName == "" {
-		return 0, nil
-	}
 	var (
-		written   int
-		separator byte = '{'
+		written            int
+		separator          byte = '{'
+		metricInsideBraces      = false
 	)
+
+	if name != "" {
+		// If the name does not pass the legacy validity check, we must put the
+		// metric name inside the braces.
+		if !model.LegacyValidation.IsValidMetricName(name) {
+			metricInsideBraces = true
+			err := w.WriteByte(separator)
+			written++
+			if err != nil {
+				return written, err
+			}
+			separator = ','
+		}
+		n, err := writeName(w, name)
+		written += n
+		if err != nil {
+			return written, err
+		}
+	}
+
+	if len(in) == 0 && additionalLabelName == "" {
+		if metricInsideBraces {
+			err := w.WriteByte('}')
+			written++
+			if err != nil {
+				return written, err
+			}
+		}
+		return written, nil
+	}
+
 	for _, lp := range in {
 		err := w.WriteByte(separator)
 		written++
 		if err != nil {
 			return written, err
 		}
-		n, err := w.WriteString(lp.GetName())
+		n, err := writeName(w, lp.GetName())
 		written += n
 		if err != nil {
 			return written, err
@@ -463,3 +494,27 @@
 	numBufPool.Put(bp)
 	return written, err
 }
+
+// writeName writes a string as-is if it complies with the legacy naming
+// scheme, or escapes it in double quotes if not.
+func writeName(w enhancedWriter, name string) (int, error) {
+	if model.LegacyValidation.IsValidMetricName(name) {
+		return w.WriteString(name)
+	}
+	var written int
+	var err error
+	err = w.WriteByte('"')
+	written++
+	if err != nil {
+		return written, err
+	}
+	var n int
+	n, err = writeEscapedString(w, name, true)
+	written += n
+	if err != nil {
+		return written, err
+	}
+	err = w.WriteByte('"')
+	written++
+	return written, err
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
index b6079b3..8f2edde 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -16,15 +16,17 @@
 import (
 	"bufio"
 	"bytes"
+	"errors"
 	"fmt"
 	"io"
 	"math"
 	"strconv"
 	"strings"
+	"unicode/utf8"
 
 	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/proto"
 
-	"github.com/golang/protobuf/proto"
 	"github.com/prometheus/common/model"
 )
 
@@ -58,6 +60,7 @@
 	currentMF            *dto.MetricFamily
 	currentMetric        *dto.Metric
 	currentLabelPair     *dto.LabelPair
+	currentLabelPairs    []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line.
 
 	// The remaining member variables are only used for summaries/histograms.
 	currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
@@ -72,6 +75,17 @@
 	// count and sum of that summary/histogram.
 	currentIsSummaryCount, currentIsSummarySum     bool
 	currentIsHistogramCount, currentIsHistogramSum bool
+	// These indicate if the metric name from the current line being parsed is inside
+	// braces and if that metric name was found respectively.
+	currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool
+	// scheme sets the desired ValidationScheme for names. Defaults to the invalid
+	// UnsetValidation.
+	scheme model.ValidationScheme
+}
+
+// NewTextParser returns a new TextParser with the provided nameValidationScheme.
+func NewTextParser(nameValidationScheme model.ValidationScheme) TextParser {
+	return TextParser{scheme: nameValidationScheme}
 }
 
 // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
@@ -112,7 +126,7 @@
 	// stream. Turn this error into something nicer and more
 	// meaningful. (io.EOF is often used as a signal for the legitimate end
 	// of an input stream.)
-	if p.err == io.EOF {
+	if p.err != nil && errors.Is(p.err, io.EOF) {
 		p.parseError("unexpected end of input stream")
 	}
 	return p.metricFamiliesByName, p.err
@@ -120,6 +134,7 @@
 
 func (p *TextParser) reset(in io.Reader) {
 	p.metricFamiliesByName = map[string]*dto.MetricFamily{}
+	p.currentLabelPairs = nil
 	if p.buf == nil {
 		p.buf = bufio.NewReader(in)
 	} else {
@@ -135,16 +150,23 @@
 	}
 	p.currentQuantile = math.NaN()
 	p.currentBucket = math.NaN()
+	p.currentMF = nil
 }
 
 // startOfLine represents the state where the next byte read from p.buf is the
 // start of a line (or whitespace leading up to it).
 func (p *TextParser) startOfLine() stateFn {
 	p.lineCount++
+	p.currentMetricIsInsideBraces = false
+	p.currentMetricInsideBracesIsPresent = false
 	if p.skipBlankTab(); p.err != nil {
-		// End of input reached. This is the only case where
-		// that is not an error but a signal that we are done.
-		p.err = nil
+		// This is the only place that we expect to see io.EOF,
+		// which is not an error but the signal that we are done.
+		// Any other error that happens to align with the start of
+		// a line is still an error.
+		if errors.Is(p.err, io.EOF) {
+			p.err = nil
+		}
 		return nil
 	}
 	switch p.currentByte {
@@ -152,6 +174,9 @@
 		return p.startComment
 	case '\n':
 		return p.startOfLine // Empty line, start the next one.
+	case '{':
+		p.currentMetricIsInsideBraces = true
+		return p.readingLabels
 	}
 	return p.readingMetricName
 }
@@ -200,6 +225,9 @@
 		return nil
 	}
 	p.setOrCreateCurrentMF()
+	if p.err != nil {
+		return nil
+	}
 	if p.skipBlankTab(); p.err != nil {
 		return nil // Unexpected end of input.
 	}
@@ -228,6 +256,9 @@
 		return nil
 	}
 	p.setOrCreateCurrentMF()
+	if p.err != nil {
+		return nil
+	}
 	// Now is the time to fix the type if it hasn't happened yet.
 	if p.currentMF.Type == nil {
 		p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
@@ -269,6 +300,8 @@
 		return nil // Unexpected end of input.
 	}
 	if p.currentByte == '}' {
+		p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
+		p.currentLabelPairs = nil
 		if p.skipBlankTab(); p.err != nil {
 			return nil // Unexpected end of input.
 		}
@@ -281,34 +314,79 @@
 		p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
 		return nil
 	}
-	p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
-	if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
-		p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
-		return nil
-	}
-	// Special summary/histogram treatment. Don't add 'quantile' and 'le'
-	// labels to 'real' labels.
-	if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
-		!(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
-		p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
-	}
 	if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
 		return nil // Unexpected end of input.
 	}
 	if p.currentByte != '=' {
+		if p.currentMetricIsInsideBraces {
+			if p.currentMetricInsideBracesIsPresent {
+				p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName()))
+				return nil
+			}
+			switch p.currentByte {
+			case ',':
+				p.setOrCreateCurrentMF()
+				if p.err != nil {
+					return nil
+				}
+				if p.currentMF.Type == nil {
+					p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+				}
+				p.currentMetric = &dto.Metric{}
+				p.currentMetricInsideBracesIsPresent = true
+				return p.startLabelName
+			case '}':
+				p.setOrCreateCurrentMF()
+				if p.err != nil {
+					p.currentLabelPairs = nil
+					return nil
+				}
+				if p.currentMF.Type == nil {
+					p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+				}
+				p.currentMetric = &dto.Metric{}
+				p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
+				p.currentLabelPairs = nil
+				if p.skipBlankTab(); p.err != nil {
+					return nil // Unexpected end of input.
+				}
+				return p.readingValue
+			default:
+				p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte))
+				return nil
+			}
+		}
 		p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
+		p.currentLabelPairs = nil
 		return nil
 	}
+	p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
+	if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
+		p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
+		p.currentLabelPairs = nil
+		return nil
+	}
+	if !p.scheme.IsValidLabelName(p.currentLabelPair.GetName()) {
+		p.parseError(fmt.Sprintf("invalid label name %q", p.currentLabelPair.GetName()))
+		p.currentLabelPairs = nil
+		return nil
+	}
+	// Special summary/histogram treatment. Don't add 'quantile' and 'le'
+	// labels to 'real' labels.
+	if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) &&
+		(p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) {
+		p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair)
+	}
 	// Check for duplicate label names.
 	labels := make(map[string]struct{})
-	for _, l := range p.currentMetric.Label {
+	for _, l := range p.currentLabelPairs {
 		lName := l.GetName()
-		if _, exists := labels[lName]; !exists {
-			labels[lName] = struct{}{}
-		} else {
+		if _, exists := labels[lName]; exists {
 			p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName()))
+			p.currentLabelPairs = nil
 			return nil
 		}
+		labels[lName] = struct{}{}
 	}
 	return p.startLabelValue
 }
@@ -339,6 +417,7 @@
 			if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
 				// Create a more helpful error message.
 				p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
+				p.currentLabelPairs = nil
 				return nil
 			}
 		} else {
@@ -365,12 +444,19 @@
 		return p.startLabelName
 
 	case '}':
+		if p.currentMF == nil {
+			p.parseError("invalid metric name")
+			return nil
+		}
+		p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
+		p.currentLabelPairs = nil
 		if p.skipBlankTab(); p.err != nil {
 			return nil // Unexpected end of input.
 		}
 		return p.readingValue
 	default:
 		p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
+		p.currentLabelPairs = nil
 		return nil
 	}
 }
@@ -381,7 +467,8 @@
 	// When we are here, we have read all the labels, so for the
 	// special case of a summary/histogram, we can finally find out
 	// if the metric already exists.
-	if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+	switch p.currentMF.GetType() {
+	case dto.MetricType_SUMMARY:
 		signature := model.LabelsToSignature(p.currentLabels)
 		if summary := p.summaries[signature]; summary != nil {
 			p.currentMetric = summary
@@ -389,7 +476,7 @@
 			p.summaries[signature] = p.currentMetric
 			p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
 		}
-	} else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+	case dto.MetricType_HISTOGRAM:
 		signature := model.LabelsToSignature(p.currentLabels)
 		if histogram := p.histograms[signature]; histogram != nil {
 			p.currentMetric = histogram
@@ -397,7 +484,7 @@
 			p.histograms[signature] = p.currentMetric
 			p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
 		}
-	} else {
+	default:
 		p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
 	}
 	if p.readTokenUntilWhitespace(); p.err != nil {
@@ -579,6 +666,8 @@
 				p.currentToken.WriteByte(p.currentByte)
 			case 'n':
 				p.currentToken.WriteByte('\n')
+			case '"':
+				p.currentToken.WriteByte('"')
 			default:
 				p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
 				return
@@ -604,13 +693,45 @@
 // but not into p.currentToken.
 func (p *TextParser) readTokenAsMetricName() {
 	p.currentToken.Reset()
+	// A UTF-8 metric name must be quoted and may have escaped characters.
+	quoted := false
+	escaped := false
 	if !isValidMetricNameStart(p.currentByte) {
 		return
 	}
-	for {
-		p.currentToken.WriteByte(p.currentByte)
+	for p.err == nil {
+		if escaped {
+			switch p.currentByte {
+			case '\\':
+				p.currentToken.WriteByte(p.currentByte)
+			case 'n':
+				p.currentToken.WriteByte('\n')
+			case '"':
+				p.currentToken.WriteByte('"')
+			default:
+				p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+				return
+			}
+			escaped = false
+		} else {
+			switch p.currentByte {
+			case '"':
+				quoted = !quoted
+				if !quoted {
+					p.currentByte, p.err = p.buf.ReadByte()
+					return
+				}
+			case '\n':
+				p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String()))
+				return
+			case '\\':
+				escaped = true
+			default:
+				p.currentToken.WriteByte(p.currentByte)
+			}
+		}
 		p.currentByte, p.err = p.buf.ReadByte()
-		if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
+		if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') {
 			return
 		}
 	}
@@ -622,13 +743,45 @@
 // but not into p.currentToken.
 func (p *TextParser) readTokenAsLabelName() {
 	p.currentToken.Reset()
+	// A UTF-8 label name must be quoted and may have escaped characters.
+	quoted := false
+	escaped := false
 	if !isValidLabelNameStart(p.currentByte) {
 		return
 	}
-	for {
-		p.currentToken.WriteByte(p.currentByte)
+	for p.err == nil {
+		if escaped {
+			switch p.currentByte {
+			case '\\':
+				p.currentToken.WriteByte(p.currentByte)
+			case 'n':
+				p.currentToken.WriteByte('\n')
+			case '"':
+				p.currentToken.WriteByte('"')
+			default:
+				p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+				return
+			}
+			escaped = false
+		} else {
+			switch p.currentByte {
+			case '"':
+				quoted = !quoted
+				if !quoted {
+					p.currentByte, p.err = p.buf.ReadByte()
+					return
+				}
+			case '\n':
+				p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String()))
+				return
+			case '\\':
+				escaped = true
+			default:
+				p.currentToken.WriteByte(p.currentByte)
+			}
+		}
 		p.currentByte, p.err = p.buf.ReadByte()
-		if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
+		if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') {
 			return
 		}
 	}
@@ -654,6 +807,7 @@
 				p.currentToken.WriteByte('\n')
 			default:
 				p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+				p.currentLabelPairs = nil
 				return
 			}
 			escaped = false
@@ -679,6 +833,10 @@
 	p.currentIsHistogramCount = false
 	p.currentIsHistogramSum = false
 	name := p.currentToken.String()
+	if !p.scheme.IsValidMetricName(name) {
+		p.parseError(fmt.Sprintf("invalid metric name %q", name))
+		return
+	}
 	if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
 		return
 	}
@@ -712,19 +870,19 @@
 }
 
 func isValidLabelNameStart(b byte) bool {
-	return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
+	return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"'
 }
 
-func isValidLabelNameContinuation(b byte) bool {
-	return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
+func isValidLabelNameContinuation(b byte, quoted bool) bool {
+	return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b)))
 }
 
 func isValidMetricNameStart(b byte) bool {
 	return isValidLabelNameStart(b) || b == ':'
 }
 
-func isValidMetricNameContinuation(b byte) bool {
-	return isValidLabelNameContinuation(b) || b == ':'
+func isValidMetricNameContinuation(b byte, quoted bool) bool {
+	return isValidLabelNameContinuation(b, quoted) || b == ':'
 }
 
 func isBlankOrTab(b byte) bool {
@@ -769,7 +927,7 @@
 
 func parseFloat(s string) (float64, error) {
 	if strings.ContainsAny(s, "pP_") {
-		return 0, fmt.Errorf("unsupported character in float")
+		return 0, errors.New("unsupported character in float")
 	}
 	return strconv.ParseFloat(s, 64)
 }
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
deleted file mode 100644
index 7723656..0000000
--- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-PACKAGE
-
-package goautoneg
-import "bitbucket.org/ww/goautoneg"
-
-HTTP Content-Type Autonegotiation.
-
-The functions in this package implement the behaviour specified in
-http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
-
-Copyright (c) 2011, Open Knowledge Foundation Ltd.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-    Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-
-    Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in
-    the documentation and/or other materials provided with the
-    distribution.
-
-    Neither the name of the Open Knowledge Foundation Ltd. nor the
-    names of its contributors may be used to endorse or promote
-    products derived from this software without specific prior written
-    permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-FUNCTIONS
-
-func Negotiate(header string, alternatives []string) (content_type string)
-Negotiate the most appropriate content_type given the accept header
-and a list of alternatives.
-
-func ParseAccept(header string) (accept []Accept)
-Parse an Accept Header string returning a sorted list
-of clauses
-
-
-TYPES
-
-type Accept struct {
-    Type, SubType string
-    Q             float32
-    Params        map[string]string
-}
-Structure to represent a clause in an HTTP Accept Header
-
-
-SUBDIRECTORIES
-
-	.hg
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
deleted file mode 100644
index 26e9228..0000000
--- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
-Copyright (c) 2011, Open Knowledge Foundation Ltd.
-All rights reserved.
-
-HTTP Content-Type Autonegotiation.
-
-The functions in this package implement the behaviour specified in
-http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-    Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-
-    Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in
-    the documentation and/or other materials provided with the
-    distribution.
-
-    Neither the name of the Open Knowledge Foundation Ltd. nor the
-    names of its contributors may be used to endorse or promote
-    products derived from this software without specific prior written
-    permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-*/
-package goautoneg
-
-import (
-	"sort"
-	"strconv"
-	"strings"
-)
-
-// Structure to represent a clause in an HTTP Accept Header
-type Accept struct {
-	Type, SubType string
-	Q             float64
-	Params        map[string]string
-}
-
-// For internal use, so that we can use the sort interface
-type accept_slice []Accept
-
-func (accept accept_slice) Len() int {
-	slice := []Accept(accept)
-	return len(slice)
-}
-
-func (accept accept_slice) Less(i, j int) bool {
-	slice := []Accept(accept)
-	ai, aj := slice[i], slice[j]
-	if ai.Q > aj.Q {
-		return true
-	}
-	if ai.Type != "*" && aj.Type == "*" {
-		return true
-	}
-	if ai.SubType != "*" && aj.SubType == "*" {
-		return true
-	}
-	return false
-}
-
-func (accept accept_slice) Swap(i, j int) {
-	slice := []Accept(accept)
-	slice[i], slice[j] = slice[j], slice[i]
-}
-
-// Parse an Accept Header string returning a sorted list
-// of clauses
-func ParseAccept(header string) (accept []Accept) {
-	parts := strings.Split(header, ",")
-	accept = make([]Accept, 0, len(parts))
-	for _, part := range parts {
-		part := strings.Trim(part, " ")
-
-		a := Accept{}
-		a.Params = make(map[string]string)
-		a.Q = 1.0
-
-		mrp := strings.Split(part, ";")
-
-		media_range := mrp[0]
-		sp := strings.Split(media_range, "/")
-		a.Type = strings.Trim(sp[0], " ")
-
-		switch {
-		case len(sp) == 1 && a.Type == "*":
-			a.SubType = "*"
-		case len(sp) == 2:
-			a.SubType = strings.Trim(sp[1], " ")
-		default:
-			continue
-		}
-
-		if len(mrp) == 1 {
-			accept = append(accept, a)
-			continue
-		}
-
-		for _, param := range mrp[1:] {
-			sp := strings.SplitN(param, "=", 2)
-			if len(sp) != 2 {
-				continue
-			}
-			token := strings.Trim(sp[0], " ")
-			if token == "q" {
-				a.Q, _ = strconv.ParseFloat(sp[1], 32)
-			} else {
-				a.Params[token] = strings.Trim(sp[1], " ")
-			}
-		}
-
-		accept = append(accept, a)
-	}
-
-	slice := accept_slice(accept)
-	sort.Sort(slice)
-
-	return
-}
-
-// Negotiate the most appropriate content_type given the accept header
-// and a list of alternatives.
-func Negotiate(header string, alternatives []string) (content_type string) {
-	asp := make([][]string, 0, len(alternatives))
-	for _, ctype := range alternatives {
-		asp = append(asp, strings.SplitN(ctype, "/", 2))
-	}
-	for _, clause := range ParseAccept(header) {
-		for i, ctsp := range asp {
-			if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
-				content_type = alternatives[i]
-				return
-			}
-			if clause.Type == ctsp[0] && clause.SubType == "*" {
-				content_type = alternatives[i]
-				return
-			}
-			if clause.Type == "*" && clause.SubType == "*" {
-				content_type = alternatives[i]
-				return
-			}
-		}
-	}
-	return
-}
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
index 35e739c..460f554 100644
--- a/vendor/github.com/prometheus/common/model/alert.go
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -14,6 +14,7 @@
 package model
 
 import (
+	"errors"
 	"fmt"
 	"time"
 )
@@ -64,7 +65,7 @@
 	return a.ResolvedAt(time.Now())
 }
 
-// ResolvedAt returns true off the activity interval ended before
+// ResolvedAt returns true iff the activity interval ended before
 // the given timestamp.
 func (a *Alert) ResolvedAt(ts time.Time) bool {
 	if a.EndsAt.IsZero() {
@@ -75,7 +76,12 @@
 
 // Status returns the status of the alert.
 func (a *Alert) Status() AlertStatus {
-	if a.Resolved() {
+	return a.StatusAt(time.Now())
+}
+
+// StatusAt returns the status of the alert at the given timestamp.
+func (a *Alert) StatusAt(ts time.Time) AlertStatus {
+	if a.ResolvedAt(ts) {
 		return AlertResolved
 	}
 	return AlertFiring
@@ -84,19 +90,19 @@
 // Validate checks whether the alert data is inconsistent.
 func (a *Alert) Validate() error {
 	if a.StartsAt.IsZero() {
-		return fmt.Errorf("start time missing")
+		return errors.New("start time missing")
 	}
 	if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
-		return fmt.Errorf("start time must be before end time")
+		return errors.New("start time must be before end time")
 	}
 	if err := a.Labels.Validate(); err != nil {
-		return fmt.Errorf("invalid label set: %s", err)
+		return fmt.Errorf("invalid label set: %w", err)
 	}
 	if len(a.Labels) == 0 {
-		return fmt.Errorf("at least one label pair required")
+		return errors.New("at least one label pair required")
 	}
 	if err := a.Annotations.Validate(); err != nil {
-		return fmt.Errorf("invalid annotations: %s", err)
+		return fmt.Errorf("invalid annotations: %w", err)
 	}
 	return nil
 }
@@ -127,6 +133,17 @@
 	return false
 }
 
+// HasFiringAt returns true iff one of the alerts is not resolved
+// at the time ts.
+func (as Alerts) HasFiringAt(ts time.Time) bool {
+	for _, a := range as {
+		if !a.ResolvedAt(ts) {
+			return true
+		}
+	}
+	return false
+}
+
 // Status returns StatusFiring iff at least one of the alerts is firing.
 func (as Alerts) Status() AlertStatus {
 	if as.HasFiring() {
@@ -134,3 +151,12 @@
 	}
 	return AlertResolved
 }
+
+// StatusAt returns StatusFiring iff at least one of the alerts is firing
+// at the time ts.
+func (as Alerts) StatusAt(ts time.Time) AlertStatus {
+	if as.HasFiringAt(ts) {
+		return AlertFiring
+	}
+	return AlertResolved
+}
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
index ef89563..dfeb34b 100644
--- a/vendor/github.com/prometheus/common/model/labels.go
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -22,7 +22,7 @@
 )
 
 const (
-	// AlertNameLabel is the name of the label containing the an alert's name.
+	// AlertNameLabel is the name of the label containing the alert's name.
 	AlertNameLabel = "alertname"
 
 	// ExportedLabelPrefix is the prefix to prepend to the label names present in
@@ -32,6 +32,12 @@
 	// MetricNameLabel is the label name indicating the metric name of a
 	// timeseries.
 	MetricNameLabel = "__name__"
+	// MetricTypeLabel is the label name indicating the metric type of
+	// timeseries as per the PROM-39 proposal.
+	MetricTypeLabel = "__type__"
+	// MetricUnitLabel is the label name indicating the metric unit of
+	// timeseries as per the PROM-39 proposal.
+	MetricUnitLabel = "__unit__"
 
 	// SchemeLabel is the name of the label that holds the scheme on which to
 	// scrape a target.
@@ -97,19 +103,24 @@
 // therewith.
 type LabelName string
 
-// IsValid is true iff the label name matches the pattern of LabelNameRE. This
-// method, however, does not use LabelNameRE for the check but a much faster
-// hardcoded implementation.
+// IsValid returns true iff the name matches the pattern of LabelNameRE when
+// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if
+// NameValidationScheme is set to UTF8Validation.
+//
+// Deprecated: This method should not be used and may be removed in the future.
+// Use [ValidationScheme.IsValidLabelName] instead.
 func (ln LabelName) IsValid() bool {
-	if len(ln) == 0 {
-		return false
-	}
-	for i, b := range ln {
-		if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
-			return false
-		}
-	}
-	return true
+	return NameValidationScheme.IsValidLabelName(string(ln))
+}
+
+// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for
+// legacy names. It does not use LabelNameRE for the check but a much faster
+// hardcoded implementation.
+//
+// Deprecated: This method should not be used and may be removed in the future.
+// Use [LegacyValidation.IsValidLabelName] instead.
+func (ln LabelName) IsValidLegacy() bool {
+	return LegacyValidation.IsValidLabelName(string(ln))
 }
 
 // UnmarshalYAML implements the yaml.Unmarshaler interface.
@@ -164,7 +175,7 @@
 // A LabelValue is an associated value for a LabelName.
 type LabelValue string
 
-// IsValid returns true iff the string is a valid UTF8.
+// IsValid returns true iff the string is a valid UTF-8.
 func (lv LabelValue) IsValid() bool {
 	return utf8.ValidString(string(lv))
 }
diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go
index 6eda08a..9de47b2 100644
--- a/vendor/github.com/prometheus/common/model/labelset.go
+++ b/vendor/github.com/prometheus/common/model/labelset.go
@@ -17,7 +17,6 @@
 	"encoding/json"
 	"fmt"
 	"sort"
-	"strings"
 )
 
 // A LabelSet is a collection of LabelName and LabelValue pairs.  The LabelSet
@@ -115,10 +114,10 @@
 }
 
 // Merge is a helper function to non-destructively merge two label sets.
-func (l LabelSet) Merge(other LabelSet) LabelSet {
-	result := make(LabelSet, len(l))
+func (ls LabelSet) Merge(other LabelSet) LabelSet {
+	result := make(LabelSet, len(ls))
 
-	for k, v := range l {
+	for k, v := range ls {
 		result[k] = v
 	}
 
@@ -129,16 +128,6 @@
 	return result
 }
 
-func (l LabelSet) String() string {
-	lstrs := make([]string, 0, len(l))
-	for l, v := range l {
-		lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
-	}
-
-	sort.Strings(lstrs)
-	return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
-}
-
 // Fingerprint returns the LabelSet's fingerprint.
 func (ls LabelSet) Fingerprint() Fingerprint {
 	return labelSetToFingerprint(ls)
@@ -151,7 +140,7 @@
 }
 
 // UnmarshalJSON implements the json.Unmarshaler interface.
-func (l *LabelSet) UnmarshalJSON(b []byte) error {
+func (ls *LabelSet) UnmarshalJSON(b []byte) error {
 	var m map[LabelName]LabelValue
 	if err := json.Unmarshal(b, &m); err != nil {
 		return err
@@ -164,6 +153,6 @@
 			return fmt.Errorf("%q is not a valid label name", ln)
 		}
 	}
-	*l = LabelSet(m)
+	*ls = LabelSet(m)
 	return nil
 }
diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go
new file mode 100644
index 0000000..abb2c90
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labelset_string.go
@@ -0,0 +1,43 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"bytes"
+	"slices"
+	"strconv"
+)
+
+// String will look like `{foo="bar", more="less"}`. Names are sorted alphabetically.
+func (l LabelSet) String() string {
+	var lna [32]string // On stack to avoid memory allocation for sorting names.
+	labelNames := lna[:0]
+	for name := range l {
+		labelNames = append(labelNames, string(name))
+	}
+	slices.Sort(labelNames)
+	var bytea [1024]byte // On stack to avoid memory allocation while building the output.
+	b := bytes.NewBuffer(bytea[:0])
+	b.WriteByte('{')
+	for i, name := range labelNames {
+		if i > 0 {
+			b.WriteString(", ")
+		}
+		b.WriteString(name)
+		b.WriteByte('=')
+		b.Write(strconv.AppendQuote(b.AvailableBuffer(), string(l[LabelName(name)])))
+	}
+	b.WriteByte('}')
+	return b.String()
+}
diff --git a/vendor/github.com/prometheus/common/model/metadata.go b/vendor/github.com/prometheus/common/model/metadata.go
new file mode 100644
index 0000000..447ab8a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/metadata.go
@@ -0,0 +1,28 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// MetricType represents metric type values.
+type MetricType string
+
+const (
+	MetricTypeCounter        = MetricType("counter")
+	MetricTypeGauge          = MetricType("gauge")
+	MetricTypeHistogram      = MetricType("histogram")
+	MetricTypeGaugeHistogram = MetricType("gaugehistogram")
+	MetricTypeSummary        = MetricType("summary")
+	MetricTypeInfo           = MetricType("info")
+	MetricTypeStateset       = MetricType("stateset")
+	MetricTypeUnknown        = MetricType("unknown")
+)
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
index 00804b7..3feebf3 100644
--- a/vendor/github.com/prometheus/common/model/metric.go
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -14,19 +14,242 @@
 package model
 
 import (
+	"encoding/json"
+	"errors"
 	"fmt"
 	"regexp"
 	"sort"
+	"strconv"
 	"strings"
+	"unicode/utf8"
+
+	dto "github.com/prometheus/client_model/go"
+	"go.yaml.in/yaml/v2"
+	"google.golang.org/protobuf/proto"
 )
 
 var (
-	// MetricNameRE is a regular expression matching valid metric
-	// names. Note that the IsValidMetricName function performs the same
-	// check but faster than a match with this regular expression.
-	MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
+	// NameValidationScheme determines the global default method of the name
+	// validation to be used by all calls to IsValidMetricName() and LabelName
+	// IsValid().
+	//
+	// Deprecated: This variable should not be used and might be removed in the
+	// far future. If you wish to stick to the legacy name validation use
+	// `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods
+	// instead. This variable is here as an escape hatch for emergency cases,
+	// given the recent change from `LegacyValidation` to `UTF8Validation`, e.g.,
+	// to delay UTF-8 migrations in time or aid in debugging unforeseen results of
+	// the change. In such a case, a temporary assignment to `LegacyValidation`
+	// value in the `init()` function in your main.go or so, could be considered.
+	//
+	// Historically we opted for a global variable for feature gating different
+	// validation schemes in operations that were not otherwise easily adjustable
+	// (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate
+	// Labels structure or package might have been a better choice. Given the
+	// change was made and many upgraded the common already, we live this as-is
+	// with this warning and learning for the future.
+	NameValidationScheme = UTF8Validation
+
+	// NameEscapingScheme defines the default way that names will be escaped when
+	// presented to systems that do not support UTF-8 names. If the Content-Type
+	// "escaping" term is specified, that will override this value.
+	// NameEscapingScheme should not be set to the NoEscaping value. That string
+	// is used in content negotiation to indicate that a system supports UTF-8 and
+	// has that feature enabled.
+	NameEscapingScheme = UnderscoreEscaping
 )
 
+// ValidationScheme is a Go enum for determining how metric and label names will
+// be validated by this library.
+type ValidationScheme int
+
+const (
+	// UnsetValidation represents an undefined ValidationScheme.
+	// Should not be used in practice.
+	UnsetValidation ValidationScheme = iota
+
+	// LegacyValidation is a setting that requires that all metric and label names
+	// conform to the original Prometheus character requirements described by
+	// MetricNameRE and LabelNameRE.
+	LegacyValidation
+
+	// UTF8Validation only requires that metric and label names be valid UTF-8
+	// strings.
+	UTF8Validation
+)
+
+var _ interface {
+	yaml.Marshaler
+	yaml.Unmarshaler
+	json.Marshaler
+	json.Unmarshaler
+	fmt.Stringer
+} = new(ValidationScheme)
+
+// String returns the string representation of s.
+func (s ValidationScheme) String() string {
+	switch s {
+	case UnsetValidation:
+		return "unset"
+	case LegacyValidation:
+		return "legacy"
+	case UTF8Validation:
+		return "utf8"
+	default:
+		panic(fmt.Errorf("unhandled ValidationScheme: %d", s))
+	}
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (s ValidationScheme) MarshalYAML() (any, error) {
+	switch s {
+	case UnsetValidation:
+		return "", nil
+	case LegacyValidation, UTF8Validation:
+		return s.String(), nil
+	default:
+		panic(fmt.Errorf("unhandled ValidationScheme: %d", s))
+	}
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (s *ValidationScheme) UnmarshalYAML(unmarshal func(any) error) error {
+	var scheme string
+	if err := unmarshal(&scheme); err != nil {
+		return err
+	}
+	return s.Set(scheme)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (s ValidationScheme) MarshalJSON() ([]byte, error) {
+	switch s {
+	case UnsetValidation:
+		return json.Marshal("")
+	case UTF8Validation, LegacyValidation:
+		return json.Marshal(s.String())
+	default:
+		return nil, fmt.Errorf("unhandled ValidationScheme: %d", s)
+	}
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (s *ValidationScheme) UnmarshalJSON(bytes []byte) error {
+	var repr string
+	if err := json.Unmarshal(bytes, &repr); err != nil {
+		return err
+	}
+	return s.Set(repr)
+}
+
+// Set implements the pflag.Value interface.
+func (s *ValidationScheme) Set(text string) error {
+	switch text {
+	case "":
+		// Don't change the value.
+	case LegacyValidation.String():
+		*s = LegacyValidation
+	case UTF8Validation.String():
+		*s = UTF8Validation
+	default:
+		return fmt.Errorf("unrecognized ValidationScheme: %q", text)
+	}
+	return nil
+}
+
+// IsValidMetricName returns whether metricName is valid according to s.
+func (s ValidationScheme) IsValidMetricName(metricName string) bool {
+	switch s {
+	case LegacyValidation:
+		if len(metricName) == 0 {
+			return false
+		}
+		for i, b := range metricName {
+			if !isValidLegacyRune(b, i) {
+				return false
+			}
+		}
+		return true
+	case UTF8Validation:
+		if len(metricName) == 0 {
+			return false
+		}
+		return utf8.ValidString(metricName)
+	default:
+		panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s.String()))
+	}
+}
+
+// IsValidLabelName returns whether labelName is valid according to s.
+func (s ValidationScheme) IsValidLabelName(labelName string) bool {
+	switch s {
+	case LegacyValidation:
+		if len(labelName) == 0 {
+			return false
+		}
+		for i, b := range labelName {
+			// TODO: Apply De Morgan's law. Make sure there are tests for this.
+			if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck
+				return false
+			}
+		}
+		return true
+	case UTF8Validation:
+		if len(labelName) == 0 {
+			return false
+		}
+		return utf8.ValidString(labelName)
+	default:
+		panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s))
+	}
+}
+
+// Type implements the pflag.Value interface.
+func (ValidationScheme) Type() string {
+	return "validationScheme"
+}
+
+type EscapingScheme int
+
+const (
+	// NoEscaping indicates that a name will not be escaped. Unescaped names that
+	// do not conform to the legacy validity check will use a new exposition
+	// format syntax that will be officially standardized in future versions.
+	NoEscaping EscapingScheme = iota
+
+	// UnderscoreEscaping replaces all legacy-invalid characters with underscores.
+	UnderscoreEscaping
+
+	// DotsEscaping is similar to UnderscoreEscaping, except that dots are
+	// converted to `_dot_` and pre-existing underscores are converted to `__`.
+	DotsEscaping
+
+	// ValueEncodingEscaping prepends the name with `U__` and replaces all invalid
+	// characters with the unicode value, surrounded by underscores. Single
+	// underscores are replaced with double underscores.
+	ValueEncodingEscaping
+)
+
+const (
+	// EscapingKey is the key in an Accept or Content-Type header that defines how
+	// metric and label names that do not conform to the legacy character
+	// requirements should be escaped when being scraped by a legacy prometheus
+	// system. If a system does not explicitly pass an escaping parameter in the
+	// Accept header, the default NameEscapingScheme will be used.
+	EscapingKey = "escaping"
+
+	// Possible values for Escaping Key.
+	AllowUTF8         = "allow-utf-8" // No escaping required.
+	EscapeUnderscores = "underscores"
+	EscapeDots        = "dots"
+	EscapeValues      = "values"
+)
+
+// MetricNameRE is a regular expression matching valid metric
+// names. Note that the IsValidMetricName function performs the same
+// check but faster than a match with this regular expression.
+var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
+
 // A Metric is similar to a LabelSet, but the key difference is that a Metric is
 // a singleton and refers to one and only one stream of samples.
 type Metric LabelSet
@@ -86,17 +309,285 @@
 	return LabelSet(m).FastFingerprint()
 }
 
-// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
+// IsValidMetricName returns true iff name matches the pattern of MetricNameRE
+// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is
+// selected.
+//
+// Deprecated: This function should not be used and might be removed in the future.
+// Use [ValidationScheme.IsValidMetricName] instead.
+func IsValidMetricName(n LabelValue) bool {
+	return NameValidationScheme.IsValidMetricName(string(n))
+}
+
+// IsValidLegacyMetricName is similar to IsValidMetricName but always uses the
+// legacy validation scheme regardless of the value of NameValidationScheme.
 // This function, however, does not use MetricNameRE for the check but a much
 // faster hardcoded implementation.
-func IsValidMetricName(n LabelValue) bool {
-	if len(n) == 0 {
-		return false
+//
+// Deprecated: This function should not be used and might be removed in the future.
+// Use [LegacyValidation.IsValidMetricName] instead.
+func IsValidLegacyMetricName(n string) bool {
+	return LegacyValidation.IsValidMetricName(n)
+}
+
+// EscapeMetricFamily escapes the given metric names and labels with the given
+// escaping scheme. Returns a new object that uses the same pointers to fields
+// when possible and creates new escaped versions so as not to mutate the
+// input.
+func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricFamily {
+	if v == nil {
+		return nil
 	}
-	for i, b := range n {
-		if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
-			return false
+
+	if scheme == NoEscaping {
+		return v
+	}
+
+	out := &dto.MetricFamily{
+		Help: v.Help,
+		Type: v.Type,
+		Unit: v.Unit,
+	}
+
+	// If the name is nil, copy as-is, don't try to escape.
+	if v.Name == nil || IsValidLegacyMetricName(v.GetName()) {
+		out.Name = v.Name
+	} else {
+		out.Name = proto.String(EscapeName(v.GetName(), scheme))
+	}
+	for _, m := range v.Metric {
+		if !metricNeedsEscaping(m) {
+			out.Metric = append(out.Metric, m)
+			continue
+		}
+
+		escaped := &dto.Metric{
+			Gauge:       m.Gauge,
+			Counter:     m.Counter,
+			Summary:     m.Summary,
+			Untyped:     m.Untyped,
+			Histogram:   m.Histogram,
+			TimestampMs: m.TimestampMs,
+		}
+
+		for _, l := range m.Label {
+			if l.GetName() == MetricNameLabel {
+				if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) {
+					escaped.Label = append(escaped.Label, l)
+					continue
+				}
+				escaped.Label = append(escaped.Label, &dto.LabelPair{
+					Name:  proto.String(MetricNameLabel),
+					Value: proto.String(EscapeName(l.GetValue(), scheme)),
+				})
+				continue
+			}
+			if l.Name == nil || IsValidLegacyMetricName(l.GetName()) {
+				escaped.Label = append(escaped.Label, l)
+				continue
+			}
+			escaped.Label = append(escaped.Label, &dto.LabelPair{
+				Name:  proto.String(EscapeName(l.GetName(), scheme)),
+				Value: l.Value,
+			})
+		}
+		out.Metric = append(out.Metric, escaped)
+	}
+	return out
+}
+
+func metricNeedsEscaping(m *dto.Metric) bool {
+	for _, l := range m.Label {
+		if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) {
+			return true
+		}
+		if !IsValidLegacyMetricName(l.GetName()) {
+			return true
 		}
 	}
-	return true
+	return false
+}
+
+// EscapeName escapes the incoming name according to the provided escaping
+// scheme. Depending on the rules of escaping, this may cause no change in the
+// string that is returned. (Especially NoEscaping, which by definition is a
+// noop). This function does not do any validation of the name.
+func EscapeName(name string, scheme EscapingScheme) string {
+	if len(name) == 0 {
+		return name
+	}
+	var escaped strings.Builder
+	switch scheme {
+	case NoEscaping:
+		return name
+	case UnderscoreEscaping:
+		if IsValidLegacyMetricName(name) {
+			return name
+		}
+		for i, b := range name {
+			if isValidLegacyRune(b, i) {
+				escaped.WriteRune(b)
+			} else {
+				escaped.WriteRune('_')
+			}
+		}
+		return escaped.String()
+	case DotsEscaping:
+		// Do not early return for legacy valid names, we still escape underscores.
+		for i, b := range name {
+			switch {
+			case b == '_':
+				escaped.WriteString("__")
+			case b == '.':
+				escaped.WriteString("_dot_")
+			case isValidLegacyRune(b, i):
+				escaped.WriteRune(b)
+			default:
+				escaped.WriteString("__")
+			}
+		}
+		return escaped.String()
+	case ValueEncodingEscaping:
+		if IsValidLegacyMetricName(name) {
+			return name
+		}
+		escaped.WriteString("U__")
+		for i, b := range name {
+			switch {
+			case b == '_':
+				escaped.WriteString("__")
+			case isValidLegacyRune(b, i):
+				escaped.WriteRune(b)
+			case !utf8.ValidRune(b):
+				escaped.WriteString("_FFFD_")
+			default:
+				escaped.WriteRune('_')
+				escaped.WriteString(strconv.FormatInt(int64(b), 16))
+				escaped.WriteRune('_')
+			}
+		}
+		return escaped.String()
+	default:
+		panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
+	}
+}
+
+// lower function taken from strconv.atoi.
+func lower(c byte) byte {
+	return c | ('x' - 'X')
+}
+
+// UnescapeName unescapes the incoming name according to the provided escaping
+// scheme if possible. Some schemes are partially or totally non-roundtripable.
+// If any error is enountered, returns the original input.
+func UnescapeName(name string, scheme EscapingScheme) string {
+	if len(name) == 0 {
+		return name
+	}
+	switch scheme {
+	case NoEscaping:
+		return name
+	case UnderscoreEscaping:
+		// It is not possible to unescape from underscore replacement.
+		return name
+	case DotsEscaping:
+		name = strings.ReplaceAll(name, "_dot_", ".")
+		name = strings.ReplaceAll(name, "__", "_")
+		return name
+	case ValueEncodingEscaping:
+		escapedName, found := strings.CutPrefix(name, "U__")
+		if !found {
+			return name
+		}
+
+		var unescaped strings.Builder
+	TOP:
+		for i := 0; i < len(escapedName); i++ {
+			// All non-underscores are treated normally.
+			if escapedName[i] != '_' {
+				unescaped.WriteByte(escapedName[i])
+				continue
+			}
+			i++
+			if i >= len(escapedName) {
+				return name
+			}
+			// A double underscore is a single underscore.
+			if escapedName[i] == '_' {
+				unescaped.WriteByte('_')
+				continue
+			}
+			// We think we are in a UTF-8 code, process it.
+			var utf8Val uint
+			for j := 0; i < len(escapedName); j++ {
+				// This is too many characters for a utf8 value based on the MaxRune
+				// value of '\U0010FFFF'.
+				if j >= 6 {
+					return name
+				}
+				// Found a closing underscore, convert to a rune, check validity, and append.
+				if escapedName[i] == '_' {
+					utf8Rune := rune(utf8Val)
+					if !utf8.ValidRune(utf8Rune) {
+						return name
+					}
+					unescaped.WriteRune(utf8Rune)
+					continue TOP
+				}
+				r := lower(escapedName[i])
+				utf8Val *= 16
+				switch {
+				case r >= '0' && r <= '9':
+					utf8Val += uint(r) - '0'
+				case r >= 'a' && r <= 'f':
+					utf8Val += uint(r) - 'a' + 10
+				default:
+					return name
+				}
+				i++
+			}
+			// Didn't find closing underscore, invalid.
+			return name
+		}
+		return unescaped.String()
+	default:
+		panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
+	}
+}
+
+func isValidLegacyRune(b rune, i int) bool {
+	return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)
+}
+
+func (e EscapingScheme) String() string {
+	switch e {
+	case NoEscaping:
+		return AllowUTF8
+	case UnderscoreEscaping:
+		return EscapeUnderscores
+	case DotsEscaping:
+		return EscapeDots
+	case ValueEncodingEscaping:
+		return EscapeValues
+	default:
+		panic(fmt.Sprintf("unknown format scheme %d", e))
+	}
+}
+
+func ToEscapingScheme(s string) (EscapingScheme, error) {
+	if s == "" {
+		return NoEscaping, errors.New("got empty string instead of escaping scheme")
+	}
+	switch s {
+	case AllowUTF8:
+		return NoEscaping, nil
+	case EscapeUnderscores:
+		return UnderscoreEscaping, nil
+	case EscapeDots:
+		return DotsEscaping, nil
+	case EscapeValues:
+		return ValueEncodingEscaping, nil
+	default:
+		return NoEscaping, fmt.Errorf("unknown format scheme %s", s)
+	}
 }
diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go
index 8762b13..dc8a002 100644
--- a/vendor/github.com/prometheus/common/model/signature.go
+++ b/vendor/github.com/prometheus/common/model/signature.go
@@ -22,10 +22,8 @@
 // when calculating their combined hash value (aka signature aka fingerprint).
 const SeparatorByte byte = 255
 
-var (
-	// cache the signature of an empty label set.
-	emptyLabelSignature = hashNew()
-)
+// cache the signature of an empty label set.
+var emptyLabelSignature = hashNew()
 
 // LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
 // given label set. (Collisions are possible but unlikely if the number of label
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
index bb99889..8f91a97 100644
--- a/vendor/github.com/prometheus/common/model/silence.go
+++ b/vendor/github.com/prometheus/common/model/silence.go
@@ -15,6 +15,7 @@
 
 import (
 	"encoding/json"
+	"errors"
 	"fmt"
 	"regexp"
 	"time"
@@ -34,7 +35,7 @@
 	}
 
 	if len(m.Name) == 0 {
-		return fmt.Errorf("label name in matcher must not be empty")
+		return errors.New("label name in matcher must not be empty")
 	}
 	if m.IsRegex {
 		if _, err := regexp.Compile(m.Value); err != nil {
@@ -77,30 +78,30 @@
 // Validate returns true iff all fields of the silence have valid values.
 func (s *Silence) Validate() error {
 	if len(s.Matchers) == 0 {
-		return fmt.Errorf("at least one matcher required")
+		return errors.New("at least one matcher required")
 	}
 	for _, m := range s.Matchers {
 		if err := m.Validate(); err != nil {
-			return fmt.Errorf("invalid matcher: %s", err)
+			return fmt.Errorf("invalid matcher: %w", err)
 		}
 	}
 	if s.StartsAt.IsZero() {
-		return fmt.Errorf("start time missing")
+		return errors.New("start time missing")
 	}
 	if s.EndsAt.IsZero() {
-		return fmt.Errorf("end time missing")
+		return errors.New("end time missing")
 	}
 	if s.EndsAt.Before(s.StartsAt) {
-		return fmt.Errorf("start time must be before end time")
+		return errors.New("start time must be before end time")
 	}
 	if s.CreatedBy == "" {
-		return fmt.Errorf("creator information missing")
+		return errors.New("creator information missing")
 	}
 	if s.Comment == "" {
-		return fmt.Errorf("comment missing")
+		return errors.New("comment missing")
 	}
 	if s.CreatedAt.IsZero() {
-		return fmt.Errorf("creation timestamp missing")
+		return errors.New("creation timestamp missing")
 	}
 	return nil
 }
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
index 7f67b16..1730b0f 100644
--- a/vendor/github.com/prometheus/common/model/time.go
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -18,7 +18,6 @@
 	"errors"
 	"fmt"
 	"math"
-	"regexp"
 	"strconv"
 	"strings"
 	"time"
@@ -127,14 +126,14 @@
 	p := strings.Split(string(b), ".")
 	switch len(p) {
 	case 1:
-		v, err := strconv.ParseInt(string(p[0]), 10, 64)
+		v, err := strconv.ParseInt(p[0], 10, 64)
 		if err != nil {
 			return err
 		}
 		*t = Time(v * second)
 
 	case 2:
-		v, err := strconv.ParseInt(string(p[0]), 10, 64)
+		v, err := strconv.ParseInt(p[0], 10, 64)
 		if err != nil {
 			return err
 		}
@@ -144,7 +143,7 @@
 		if prec < 0 {
 			p[1] = p[1][:dotPrecision]
 		} else if prec > 0 {
-			p[1] = p[1] + strings.Repeat("0", prec)
+			p[1] += strings.Repeat("0", prec)
 		}
 
 		va, err := strconv.ParseInt(p[1], 10, 32)
@@ -171,77 +170,120 @@
 // This type should not propagate beyond the scope of input/output processing.
 type Duration time.Duration
 
-// Set implements pflag/flag.Value
+// Set implements pflag/flag.Value.
 func (d *Duration) Set(s string) error {
 	var err error
 	*d, err = ParseDuration(s)
 	return err
 }
 
-// Type implements pflag.Value
-func (d *Duration) Type() string {
+// Type implements pflag.Value.
+func (*Duration) Type() string {
 	return "duration"
 }
 
-var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$")
+func isdigit(c byte) bool { return c >= '0' && c <= '9' }
+
+// Units are required to go in order from biggest to smallest.
+// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day.
+var unitMap = map[string]struct {
+	pos  int
+	mult uint64
+}{
+	"ms": {7, uint64(time.Millisecond)},
+	"s":  {6, uint64(time.Second)},
+	"m":  {5, uint64(time.Minute)},
+	"h":  {4, uint64(time.Hour)},
+	"d":  {3, uint64(24 * time.Hour)},
+	"w":  {2, uint64(7 * 24 * time.Hour)},
+	"y":  {1, uint64(365 * 24 * time.Hour)},
+}
 
 // ParseDuration parses a string into a time.Duration, assuming that a year
 // always has 365d, a week always has 7d, and a day always has 24h.
-func ParseDuration(durationStr string) (Duration, error) {
-	switch durationStr {
+// Negative durations are not supported.
+func ParseDuration(s string) (Duration, error) {
+	switch s {
 	case "0":
 		// Allow 0 without a unit.
 		return 0, nil
 	case "":
-		return 0, fmt.Errorf("empty duration string")
+		return 0, errors.New("empty duration string")
 	}
-	matches := durationRE.FindStringSubmatch(durationStr)
-	if matches == nil {
-		return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
-	}
-	var dur time.Duration
 
-	// Parse the match at pos `pos` in the regex and use `mult` to turn that
-	// into ms, then add that value to the total parsed duration.
-	var overflowErr error
-	m := func(pos int, mult time.Duration) {
-		if matches[pos] == "" {
-			return
+	orig := s
+	var dur uint64
+	lastUnitPos := 0
+
+	for s != "" {
+		if !isdigit(s[0]) {
+			return 0, fmt.Errorf("not a valid duration string: %q", orig)
 		}
-		n, _ := strconv.Atoi(matches[pos])
+		// Consume [0-9]*
+		i := 0
+		for ; i < len(s) && isdigit(s[i]); i++ {
+		}
+		v, err := strconv.ParseUint(s[:i], 10, 0)
+		if err != nil {
+			return 0, fmt.Errorf("not a valid duration string: %q", orig)
+		}
+		s = s[i:]
 
+		// Consume unit.
+		for i = 0; i < len(s) && !isdigit(s[i]); i++ {
+		}
+		if i == 0 {
+			return 0, fmt.Errorf("not a valid duration string: %q", orig)
+		}
+		u := s[:i]
+		s = s[i:]
+		unit, ok := unitMap[u]
+		if !ok {
+			return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig)
+		}
+		if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest.
+			return 0, fmt.Errorf("not a valid duration string: %q", orig)
+		}
+		lastUnitPos = unit.pos
 		// Check if the provided duration overflows time.Duration (> ~ 290years).
-		if n > int((1<<63-1)/mult/time.Millisecond) {
-			overflowErr = errors.New("duration out of range")
+		if v > 1<<63/unit.mult {
+			return 0, errors.New("duration out of range")
 		}
-		d := time.Duration(n) * time.Millisecond
-		dur += d * mult
-
-		if dur < 0 {
-			overflowErr = errors.New("duration out of range")
+		dur += v * unit.mult
+		if dur > 1<<63-1 {
+			return 0, errors.New("duration out of range")
 		}
 	}
 
-	m(2, 1000*60*60*24*365) // y
-	m(4, 1000*60*60*24*7)   // w
-	m(6, 1000*60*60*24)     // d
-	m(8, 1000*60*60)        // h
-	m(10, 1000*60)          // m
-	m(12, 1000)             // s
-	m(14, 1)                // ms
+	return Duration(dur), nil
+}
 
-	return Duration(dur), overflowErr
+// ParseDurationAllowNegative is like ParseDuration but also accepts negative durations.
+func ParseDurationAllowNegative(s string) (Duration, error) {
+	if s == "" || s[0] != '-' {
+		return ParseDuration(s)
+	}
+
+	d, err := ParseDuration(s[1:])
+
+	return -d, err
 }
 
 func (d Duration) String() string {
 	var (
-		ms = int64(time.Duration(d) / time.Millisecond)
-		r  = ""
+		ms   = int64(time.Duration(d) / time.Millisecond)
+		r    = ""
+		sign = ""
 	)
+
 	if ms == 0 {
 		return "0s"
 	}
 
+	if ms < 0 {
+		sign, ms = "-", -ms
+	}
+
 	f := func(unit string, mult int64, exact bool) {
 		if exact && ms%mult != 0 {
 			return
@@ -263,7 +305,7 @@
 	f("s", 1000, false)
 	f("ms", 1, false)
 
-	return r
+	return sign + r
 }
 
 // MarshalJSON implements the json.Marshaler interface.
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
index c9d8fb1..a9995a3 100644
--- a/vendor/github.com/prometheus/common/model/value.go
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -16,104 +16,26 @@
 import (
 	"encoding/json"
 	"fmt"
-	"math"
 	"sort"
 	"strconv"
 	"strings"
 )
 
-var (
-	// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
-	// non-existing sample pair. It is a SamplePair with timestamp Earliest and
-	// value 0.0. Note that the natural zero value of SamplePair has a timestamp
-	// of 0, which is possible to appear in a real SamplePair and thus not
-	// suitable to signal a non-existing SamplePair.
-	ZeroSamplePair = SamplePair{Timestamp: Earliest}
+// ZeroSample is the pseudo zero-value of Sample used to signal a
+// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
+// and metric nil. Note that the natural zero value of Sample has a timestamp
+// of 0, which is possible to appear in a real Sample and thus not suitable
+// to signal a non-existing Sample.
+var ZeroSample = Sample{Timestamp: Earliest}
 
-	// ZeroSample is the pseudo zero-value of Sample used to signal a
-	// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
-	// and metric nil. Note that the natural zero value of Sample has a timestamp
-	// of 0, which is possible to appear in a real Sample and thus not suitable
-	// to signal a non-existing Sample.
-	ZeroSample = Sample{Timestamp: Earliest}
-)
-
-// A SampleValue is a representation of a value for a given sample at a given
-// time.
-type SampleValue float64
-
-// MarshalJSON implements json.Marshaler.
-func (v SampleValue) MarshalJSON() ([]byte, error) {
-	return json.Marshal(v.String())
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (v *SampleValue) UnmarshalJSON(b []byte) error {
-	if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
-		return fmt.Errorf("sample value must be a quoted string")
-	}
-	f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
-	if err != nil {
-		return err
-	}
-	*v = SampleValue(f)
-	return nil
-}
-
-// Equal returns true if the value of v and o is equal or if both are NaN. Note
-// that v==o is false if both are NaN. If you want the conventional float
-// behavior, use == to compare two SampleValues.
-func (v SampleValue) Equal(o SampleValue) bool {
-	if v == o {
-		return true
-	}
-	return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
-}
-
-func (v SampleValue) String() string {
-	return strconv.FormatFloat(float64(v), 'f', -1, 64)
-}
-
-// SamplePair pairs a SampleValue with a Timestamp.
-type SamplePair struct {
-	Timestamp Time
-	Value     SampleValue
-}
-
-// MarshalJSON implements json.Marshaler.
-func (s SamplePair) MarshalJSON() ([]byte, error) {
-	t, err := json.Marshal(s.Timestamp)
-	if err != nil {
-		return nil, err
-	}
-	v, err := json.Marshal(s.Value)
-	if err != nil {
-		return nil, err
-	}
-	return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (s *SamplePair) UnmarshalJSON(b []byte) error {
-	v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
-	return json.Unmarshal(b, &v)
-}
-
-// Equal returns true if this SamplePair and o have equal Values and equal
-// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
-func (s *SamplePair) Equal(o *SamplePair) bool {
-	return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
-}
-
-func (s SamplePair) String() string {
-	return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
-}
-
-// Sample is a sample pair associated with a metric.
+// Sample is a sample pair associated with a metric. A single sample must either
+// define Value or Histogram but not both. Histogram == nil implies the Value
+// field is used, otherwise it should be ignored.
 type Sample struct {
-	Metric    Metric      `json:"metric"`
-	Value     SampleValue `json:"value"`
-	Timestamp Time        `json:"timestamp"`
+	Metric    Metric           `json:"metric"`
+	Value     SampleValue      `json:"value"`
+	Timestamp Time             `json:"timestamp"`
+	Histogram *SampleHistogram `json:"histogram"`
 }
 
 // Equal compares first the metrics, then the timestamp, then the value. The
@@ -129,11 +51,19 @@
 	if !s.Timestamp.Equal(o.Timestamp) {
 		return false
 	}
-
+	if s.Histogram != nil {
+		return s.Histogram.Equal(o.Histogram)
+	}
 	return s.Value.Equal(o.Value)
 }
 
 func (s Sample) String() string {
+	if s.Histogram != nil {
+		return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{
+			Timestamp: s.Timestamp,
+			Histogram: s.Histogram,
+		})
+	}
 	return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
 		Timestamp: s.Timestamp,
 		Value:     s.Value,
@@ -142,6 +72,19 @@
 
 // MarshalJSON implements json.Marshaler.
 func (s Sample) MarshalJSON() ([]byte, error) {
+	if s.Histogram != nil {
+		v := struct {
+			Metric    Metric              `json:"metric"`
+			Histogram SampleHistogramPair `json:"histogram"`
+		}{
+			Metric: s.Metric,
+			Histogram: SampleHistogramPair{
+				Timestamp: s.Timestamp,
+				Histogram: s.Histogram,
+			},
+		}
+		return json.Marshal(&v)
+	}
 	v := struct {
 		Metric Metric     `json:"metric"`
 		Value  SamplePair `json:"value"`
@@ -152,21 +95,25 @@
 			Value:     s.Value,
 		},
 	}
-
 	return json.Marshal(&v)
 }
 
 // UnmarshalJSON implements json.Unmarshaler.
 func (s *Sample) UnmarshalJSON(b []byte) error {
 	v := struct {
-		Metric Metric     `json:"metric"`
-		Value  SamplePair `json:"value"`
+		Metric    Metric              `json:"metric"`
+		Value     SamplePair          `json:"value"`
+		Histogram SampleHistogramPair `json:"histogram"`
 	}{
 		Metric: s.Metric,
 		Value: SamplePair{
 			Timestamp: s.Timestamp,
 			Value:     s.Value,
 		},
+		Histogram: SampleHistogramPair{
+			Timestamp: s.Timestamp,
+			Histogram: s.Histogram,
+		},
 	}
 
 	if err := json.Unmarshal(b, &v); err != nil {
@@ -174,8 +121,13 @@
 	}
 
 	s.Metric = v.Metric
-	s.Timestamp = v.Value.Timestamp
-	s.Value = v.Value.Value
+	if v.Histogram.Histogram != nil {
+		s.Timestamp = v.Histogram.Timestamp
+		s.Histogram = v.Histogram.Histogram
+	} else {
+		s.Timestamp = v.Value.Timestamp
+		s.Value = v.Value.Value
+	}
 
 	return nil
 }
@@ -221,80 +173,77 @@
 
 // SampleStream is a stream of Values belonging to an attached COWMetric.
 type SampleStream struct {
-	Metric Metric       `json:"metric"`
-	Values []SamplePair `json:"values"`
+	Metric     Metric                `json:"metric"`
+	Values     []SamplePair          `json:"values"`
+	Histograms []SampleHistogramPair `json:"histograms"`
 }
 
 func (ss SampleStream) String() string {
-	vals := make([]string, len(ss.Values))
+	valuesLength := len(ss.Values)
+	vals := make([]string, valuesLength+len(ss.Histograms))
 	for i, v := range ss.Values {
 		vals[i] = v.String()
 	}
+	for i, v := range ss.Histograms {
+		vals[i+valuesLength] = v.String()
+	}
 	return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
 }
 
-// Value is a generic interface for values resulting from a query evaluation.
-type Value interface {
-	Type() ValueType
-	String() string
+func (ss SampleStream) MarshalJSON() ([]byte, error) {
+	switch {
+	case len(ss.Histograms) > 0 && len(ss.Values) > 0:
+		v := struct {
+			Metric     Metric                `json:"metric"`
+			Values     []SamplePair          `json:"values"`
+			Histograms []SampleHistogramPair `json:"histograms"`
+		}{
+			Metric:     ss.Metric,
+			Values:     ss.Values,
+			Histograms: ss.Histograms,
+		}
+		return json.Marshal(&v)
+	case len(ss.Histograms) > 0:
+		v := struct {
+			Metric     Metric                `json:"metric"`
+			Histograms []SampleHistogramPair `json:"histograms"`
+		}{
+			Metric:     ss.Metric,
+			Histograms: ss.Histograms,
+		}
+		return json.Marshal(&v)
+	default:
+		v := struct {
+			Metric Metric       `json:"metric"`
+			Values []SamplePair `json:"values"`
+		}{
+			Metric: ss.Metric,
+			Values: ss.Values,
+		}
+		return json.Marshal(&v)
+	}
 }
 
-func (Matrix) Type() ValueType  { return ValMatrix }
-func (Vector) Type() ValueType  { return ValVector }
-func (*Scalar) Type() ValueType { return ValScalar }
-func (*String) Type() ValueType { return ValString }
+func (ss *SampleStream) UnmarshalJSON(b []byte) error {
+	v := struct {
+		Metric     Metric                `json:"metric"`
+		Values     []SamplePair          `json:"values"`
+		Histograms []SampleHistogramPair `json:"histograms"`
+	}{
+		Metric:     ss.Metric,
+		Values:     ss.Values,
+		Histograms: ss.Histograms,
+	}
 
-type ValueType int
-
-const (
-	ValNone ValueType = iota
-	ValScalar
-	ValVector
-	ValMatrix
-	ValString
-)
-
-// MarshalJSON implements json.Marshaler.
-func (et ValueType) MarshalJSON() ([]byte, error) {
-	return json.Marshal(et.String())
-}
-
-func (et *ValueType) UnmarshalJSON(b []byte) error {
-	var s string
-	if err := json.Unmarshal(b, &s); err != nil {
+	if err := json.Unmarshal(b, &v); err != nil {
 		return err
 	}
-	switch s {
-	case "<ValNone>":
-		*et = ValNone
-	case "scalar":
-		*et = ValScalar
-	case "vector":
-		*et = ValVector
-	case "matrix":
-		*et = ValMatrix
-	case "string":
-		*et = ValString
-	default:
-		return fmt.Errorf("unknown value type %q", s)
-	}
-	return nil
-}
 
-func (e ValueType) String() string {
-	switch e {
-	case ValNone:
-		return "<ValNone>"
-	case ValScalar:
-		return "scalar"
-	case ValVector:
-		return "vector"
-	case ValMatrix:
-		return "matrix"
-	case ValString:
-		return "string"
-	}
-	panic("ValueType.String: unhandled value type")
+	ss.Metric = v.Metric
+	ss.Values = v.Values
+	ss.Histograms = v.Histograms
+
+	return nil
 }
 
 // Scalar is a scalar value evaluated at the set timestamp.
@@ -310,7 +259,7 @@
 // MarshalJSON implements json.Marshaler.
 func (s Scalar) MarshalJSON() ([]byte, error) {
 	v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
-	return json.Marshal([...]interface{}{s.Timestamp, string(v)})
+	return json.Marshal([...]interface{}{s.Timestamp, v})
 }
 
 // UnmarshalJSON implements json.Unmarshaler.
@@ -324,7 +273,7 @@
 
 	value, err := strconv.ParseFloat(f, 64)
 	if err != nil {
-		return fmt.Errorf("error parsing sample value: %s", err)
+		return fmt.Errorf("error parsing sample value: %w", err)
 	}
 	s.Value = SampleValue(value)
 	return nil
@@ -401,9 +350,9 @@
 func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
 func (m Matrix) Swap(i, j int)      { m[i], m[j] = m[j], m[i] }
 
-func (mat Matrix) String() string {
-	matCp := make(Matrix, len(mat))
-	copy(matCp, mat)
+func (m Matrix) String() string {
+	matCp := make(Matrix, len(m))
+	copy(matCp, m)
 	sort.Sort(matCp)
 
 	strs := make([]string, len(matCp))
diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go
new file mode 100644
index 0000000..6bfc757
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_float.go
@@ -0,0 +1,99 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"math"
+	"strconv"
+)
+
+// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+// non-existing sample pair. It is a SamplePair with timestamp Earliest and
+// value 0.0. Note that the natural zero value of SamplePair has a timestamp
+// of 0, which is possible to appear in a real SamplePair and thus not
+// suitable to signal a non-existing SamplePair.
+var ZeroSamplePair = SamplePair{Timestamp: Earliest}
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+	return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+	if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+		return errors.New("sample value must be a quoted string")
+	}
+	f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+	if err != nil {
+		return err
+	}
+	*v = SampleValue(f)
+	return nil
+}
+
+// Equal returns true if the value of v and o is equal or if both are NaN. Note
+// that v==o is false if both are NaN. If you want the conventional float
+// behavior, use == to compare two SampleValues.
+func (v SampleValue) Equal(o SampleValue) bool {
+	if v == o {
+		return true
+	}
+	return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
+}
+
+func (v SampleValue) String() string {
+	return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+	Timestamp Time
+	Value     SampleValue
+}
+
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+	t, err := json.Marshal(s.Timestamp)
+	if err != nil {
+		return nil, err
+	}
+	v, err := json.Marshal(s.Value)
+	if err != nil {
+		return nil, err
+	}
+	return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+	v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+	return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+	return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+	return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go
new file mode 100644
index 0000000..91ce5b7
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_histogram.go
@@ -0,0 +1,179 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+type FloatString float64
+
+func (v FloatString) String() string {
+	return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+func (v FloatString) MarshalJSON() ([]byte, error) {
+	return json.Marshal(v.String())
+}
+
+func (v *FloatString) UnmarshalJSON(b []byte) error {
+	if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+		return errors.New("float value must be a quoted string")
+	}
+	f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+	if err != nil {
+		return err
+	}
+	*v = FloatString(f)
+	return nil
+}
+
+type HistogramBucket struct {
+	Boundaries int32
+	Lower      FloatString
+	Upper      FloatString
+	Count      FloatString
+}
+
+func (s HistogramBucket) MarshalJSON() ([]byte, error) {
+	b, err := json.Marshal(s.Boundaries)
+	if err != nil {
+		return nil, err
+	}
+	l, err := json.Marshal(s.Lower)
+	if err != nil {
+		return nil, err
+	}
+	u, err := json.Marshal(s.Upper)
+	if err != nil {
+		return nil, err
+	}
+	c, err := json.Marshal(s.Count)
+	if err != nil {
+		return nil, err
+	}
+	return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil
+}
+
+func (s *HistogramBucket) UnmarshalJSON(buf []byte) error {
+	tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count}
+	wantLen := len(tmp)
+	if err := json.Unmarshal(buf, &tmp); err != nil {
+		return err
+	}
+	if gotLen := len(tmp); gotLen != wantLen {
+		return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
+	}
+	return nil
+}
+
+func (s *HistogramBucket) Equal(o *HistogramBucket) bool {
+	return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count)
+}
+
+func (s HistogramBucket) String() string {
+	var sb strings.Builder
+	lowerInclusive := s.Boundaries == 1 || s.Boundaries == 3
+	upperInclusive := s.Boundaries == 0 || s.Boundaries == 3
+	if lowerInclusive {
+		sb.WriteRune('[')
+	} else {
+		sb.WriteRune('(')
+	}
+	fmt.Fprintf(&sb, "%g,%g", s.Lower, s.Upper)
+	if upperInclusive {
+		sb.WriteRune(']')
+	} else {
+		sb.WriteRune(')')
+	}
+	fmt.Fprintf(&sb, ":%v", s.Count)
+	return sb.String()
+}
+
+type HistogramBuckets []*HistogramBucket
+
+func (s HistogramBuckets) Equal(o HistogramBuckets) bool {
+	if len(s) != len(o) {
+		return false
+	}
+
+	for i, bucket := range s {
+		if !bucket.Equal(o[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+type SampleHistogram struct {
+	Count   FloatString      `json:"count"`
+	Sum     FloatString      `json:"sum"`
+	Buckets HistogramBuckets `json:"buckets"`
+}
+
+func (s SampleHistogram) String() string {
+	return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets)
+}
+
+func (s *SampleHistogram) Equal(o *SampleHistogram) bool {
+	return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets))
+}
+
+type SampleHistogramPair struct {
+	Timestamp Time
+	// Histogram should never be nil, it's only stored as pointer for efficiency.
+	Histogram *SampleHistogram
+}
+
+func (s SampleHistogramPair) MarshalJSON() ([]byte, error) {
+	if s.Histogram == nil {
+		return nil, errors.New("histogram is nil")
+	}
+	t, err := json.Marshal(s.Timestamp)
+	if err != nil {
+		return nil, err
+	}
+	v, err := json.Marshal(s.Histogram)
+	if err != nil {
+		return nil, err
+	}
+	return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error {
+	tmp := []interface{}{&s.Timestamp, &s.Histogram}
+	wantLen := len(tmp)
+	if err := json.Unmarshal(buf, &tmp); err != nil {
+		return err
+	}
+	if gotLen := len(tmp); gotLen != wantLen {
+		return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
+	}
+	if s.Histogram == nil {
+		return errors.New("histogram is null")
+	}
+	return nil
+}
+
+func (s SampleHistogramPair) String() string {
+	return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp)
+}
+
+func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool {
+	return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp))
+}
diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go
new file mode 100644
index 0000000..078910f
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_type.go
@@ -0,0 +1,83 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"fmt"
+)
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+	Type() ValueType
+	String() string
+}
+
+func (Matrix) Type() ValueType  { return ValMatrix }
+func (Vector) Type() ValueType  { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+	ValNone ValueType = iota
+	ValScalar
+	ValVector
+	ValMatrix
+	ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+	return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+	var s string
+	if err := json.Unmarshal(b, &s); err != nil {
+		return err
+	}
+	switch s {
+	case "<ValNone>":
+		*et = ValNone
+	case "scalar":
+		*et = ValScalar
+	case "vector":
+		*et = ValVector
+	case "matrix":
+		*et = ValMatrix
+	case "string":
+		*et = ValString
+	default:
+		return fmt.Errorf("unknown value type %q", s)
+	}
+	return nil
+}
+
+func (et ValueType) String() string {
+	switch et {
+	case ValNone:
+		return "<ValNone>"
+	case ValScalar:
+		return "scalar"
+	case ValVector:
+		return "vector"
+	case ValMatrix:
+		return "matrix"
+	case ValString:
+		return "string"
+	}
+	panic("ValueType.String: unhandled value type")
+}
diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore
index 25e3659..7cc33ae 100644
--- a/vendor/github.com/prometheus/procfs/.gitignore
+++ b/vendor/github.com/prometheus/procfs/.gitignore
@@ -1 +1,2 @@
-/fixtures/
+/testdata/fixtures/
+/fixtures
diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml
index 0aa09ed..3c3bf91 100644
--- a/vendor/github.com/prometheus/procfs/.golangci.yml
+++ b/vendor/github.com/prometheus/procfs/.golangci.yml
@@ -1,4 +1,45 @@
----
+version: "2"
 linters:
   enable:
-  - golint
+    - forbidigo
+    - godot
+    - misspell
+    - revive
+    - testifylint
+  settings:
+    forbidigo:
+      forbid:
+        - pattern: ^fmt\.Print.*$
+          msg: Do not commit print statements.
+    godot:
+      exclude:
+        # Ignore "See: URL".
+        - 'See:'
+      capital: true
+    misspell:
+      locale: US
+  exclusions:
+    generated: lax
+    presets:
+      - comments
+      - common-false-positives
+      - legacy
+      - std-error-handling
+    paths:
+      - third_party$
+      - builtin$
+      - examples$
+formatters:
+  enable:
+    - gofmt
+    - goimports
+  settings:
+    goimports:
+      local-prefixes:
+        - github.com/prometheus/procfs
+  exclusions:
+    generated: lax
+    paths:
+      - third_party$
+      - builtin$
+      - examples$
diff --git a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
index 9a1aff4..d325872 100644
--- a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
+++ b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
@@ -1,3 +1,3 @@
-## Prometheus Community Code of Conduct
+# Prometheus Community Code of Conduct
 
-Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
+Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
index 943de76..853eb9d 100644
--- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
+++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
@@ -97,7 +97,7 @@
 reads in the same file.  Also, most of the files are relatively small (less than a few KBs), and system calls
 to the `stat` function will often return the wrong size.  Therefore, for most files it's recommended to read the 
 full file in a single operation using an internal utility function called `util.ReadFileNoStat`.
-This function is similar to `ioutil.ReadFile`, but it avoids the system call to `stat` to get the current size of
+This function is similar to `os.ReadFile`, but it avoids the system call to `stat` to get the current size of
 the file.
 
 Note that parsing the file's contents can still be performed one line at a time.  This is done by first reading 
@@ -113,7 +113,7 @@
 ```
 
 The `/sys` filesystem contains many very small files which contain only a single numeric or text value.  These files
-can be read using an internal function called `util.SysReadFile` which is similar to `ioutil.ReadFile` but does
+can be read using an internal function called `util.SysReadFile` which is similar to `os.ReadFile` but does
 not bother to check the size of the file before reading.
 ```
     data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity")
diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
index 56ba67d..e00f3b3 100644
--- a/vendor/github.com/prometheus/procfs/MAINTAINERS.md
+++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
@@ -1,2 +1,3 @@
 * Johannes 'fish' Ziemke <github@freigeist.org> @discordianfish
-* Paul Gier <pgier@redhat.com> @pgier
+* Paul Gier <paulgier@gmail.com> @pgier
+* Ben Kochie <superq@gmail.com> @SuperQ
diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile
index 616a0d2..7edfe4d 100644
--- a/vendor/github.com/prometheus/procfs/Makefile
+++ b/vendor/github.com/prometheus/procfs/Makefile
@@ -14,16 +14,18 @@
 include Makefile.common
 
 %/.unpacked: %.ttar
-	@echo ">> extracting fixtures"
+	@echo ">> extracting fixtures $*"
 	./ttar -C $(dir $*) -x -f $*.ttar
 	touch $@
 
+fixtures: testdata/fixtures/.unpacked
+
 update_fixtures:
-	rm -vf fixtures/.unpacked
-	./ttar -c -f fixtures.ttar fixtures/
+	rm -vf testdata/fixtures/.unpacked
+	./ttar -c -f testdata/fixtures.ttar -C testdata/ fixtures/
 
 .PHONY: build
 build:
 
 .PHONY: test
-test: fixtures/.unpacked common-test
+test: testdata/fixtures/.unpacked common-test
diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common
index 3ac29c6..0ed55c2 100644
--- a/vendor/github.com/prometheus/procfs/Makefile.common
+++ b/vendor/github.com/prometheus/procfs/Makefile.common
@@ -33,32 +33,9 @@
 GOHOSTARCH   ?= $(shell $(GO) env GOHOSTARCH)
 
 GO_VERSION        ?= $(shell $(GO) version)
-GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
+GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))Error Parsing File
 PRE_GO_111        ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
 
-GOVENDOR :=
-GO111MODULE :=
-ifeq (, $(PRE_GO_111))
-	ifneq (,$(wildcard go.mod))
-		# Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI).
-		GO111MODULE := on
-
-		ifneq (,$(wildcard vendor))
-			# Always use the local vendor/ directory to satisfy the dependencies.
-			GOOPTS := $(GOOPTS) -mod=vendor
-		endif
-	endif
-else
-	ifneq (,$(wildcard go.mod))
-		ifneq (,$(wildcard vendor))
-$(warning This repository requires Go >= 1.11 because of Go modules)
-$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)')
-		endif
-	else
-		# This repository isn't using Go modules (yet).
-		GOVENDOR := $(FIRST_GOPATH)/bin/govendor
-	endif
-endif
 PROMU        := $(FIRST_GOPATH)/bin/promu
 pkgs          = ./...
 
@@ -72,23 +49,32 @@
 GOTEST := $(GO) test
 GOTEST_DIR :=
 ifneq ($(CIRCLE_JOB),)
-ifneq ($(shell which gotestsum),)
+ifneq ($(shell command -v gotestsum 2> /dev/null),)
 	GOTEST_DIR := test-results
 	GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
 endif
 endif
 
-PROMU_VERSION ?= 0.7.0
+PROMU_VERSION ?= 0.17.0
 PROMU_URL     := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
 
+SKIP_GOLANGCI_LINT :=
 GOLANGCI_LINT :=
 GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v1.18.0
-# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
+GOLANGCI_LINT_VERSION ?= v2.0.2
+# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
 # windows isn't included here because of the path separator being different.
 ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
-	ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
-		GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
+	ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64))
+		# If we're in CI and there is an Actions file, that means the linter
+		# is being run in Actions, so we don't need to run it here.
+		ifneq (,$(SKIP_GOLANGCI_LINT))
+			GOLANGCI_LINT :=
+		else ifeq (,$(CIRCLE_JOB))
+			GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
+		else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
+			GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
+		endif
 	endif
 endif
 
@@ -105,6 +91,8 @@
 PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
 TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
 
+SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))
+
 ifeq ($(GOHOSTARCH),amd64)
         ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
                 # Only supported on amd64
@@ -118,7 +106,7 @@
 %: common-% ;
 
 .PHONY: common-all
-common-all: precheck style check_license lint unused build test
+common-all: precheck style check_license lint yamllint unused build test
 
 .PHONY: common-style
 common-style:
@@ -144,32 +132,25 @@
 .PHONY: common-deps
 common-deps:
 	@echo ">> getting dependencies"
-ifdef GO111MODULE
-	GO111MODULE=$(GO111MODULE) $(GO) mod download
-else
-	$(GO) get $(GOOPTS) -t ./...
-endif
+	$(GO) mod download
 
 .PHONY: update-go-deps
 update-go-deps:
 	@echo ">> updating Go dependencies"
 	@for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
-		$(GO) get $$m; \
+		$(GO) get -d $$m; \
 	done
-	GO111MODULE=$(GO111MODULE) $(GO) mod tidy
-ifneq (,$(wildcard vendor))
-	GO111MODULE=$(GO111MODULE) $(GO) mod vendor
-endif
+	$(GO) mod tidy
 
 .PHONY: common-test-short
 common-test-short: $(GOTEST_DIR)
 	@echo ">> running short tests"
-	GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs)
+	$(GOTEST) -short $(GOOPTS) $(pkgs)
 
 .PHONY: common-test
 common-test: $(GOTEST_DIR)
 	@echo ">> running all tests"
-	GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)
+	$(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)
 
 $(GOTEST_DIR):
 	@mkdir -p $@
@@ -177,25 +158,34 @@
 .PHONY: common-format
 common-format:
 	@echo ">> formatting code"
-	GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs)
+	$(GO) fmt $(pkgs)
 
 .PHONY: common-vet
 common-vet:
 	@echo ">> vetting code"
-	GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs)
+	$(GO) vet $(GOOPTS) $(pkgs)
 
 .PHONY: common-lint
 common-lint: $(GOLANGCI_LINT)
 ifdef GOLANGCI_LINT
 	@echo ">> running golangci-lint"
-ifdef GO111MODULE
-# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
-# Otherwise staticcheck might fail randomly for some reason not yet explained.
-	GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
-	GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
-else
-	$(GOLANGCI_LINT) run $(pkgs)
+	$(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
 endif
+
+.PHONY: common-lint-fix
+common-lint-fix: $(GOLANGCI_LINT)
+ifdef GOLANGCI_LINT
+	@echo ">> running golangci-lint fix"
+	$(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs)
+endif
+
+.PHONY: common-yamllint
+common-yamllint:
+	@echo ">> running yamllint on all YAML files in the repository"
+ifeq (, $(shell command -v yamllint 2> /dev/null))
+	@echo "yamllint not installed so skipping"
+else
+	yamllint .
 endif
 
 # For backward-compatibility.
@@ -203,38 +193,29 @@
 common-staticcheck: lint
 
 .PHONY: common-unused
-common-unused: $(GOVENDOR)
-ifdef GOVENDOR
-	@echo ">> running check for unused packages"
-	@$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages'
-else
-ifdef GO111MODULE
+common-unused:
 	@echo ">> running check for unused/missing packages in go.mod"
-	GO111MODULE=$(GO111MODULE) $(GO) mod tidy
-ifeq (,$(wildcard vendor))
+	$(GO) mod tidy
 	@git diff --exit-code -- go.sum go.mod
-else
-	@echo ">> running check for unused packages in vendor/"
-	GO111MODULE=$(GO111MODULE) $(GO) mod vendor
-	@git diff --exit-code -- go.sum go.mod vendor/
-endif
-endif
-endif
 
 .PHONY: common-build
 common-build: promu
 	@echo ">> building binaries"
-	GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
+	$(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
 
 .PHONY: common-tarball
 common-tarball: promu
 	@echo ">> building release tarball"
 	$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
 
+.PHONY: common-docker-repo-name
+common-docker-repo-name:
+	@echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
+
 .PHONY: common-docker $(BUILD_DOCKER_ARCHS)
 common-docker: $(BUILD_DOCKER_ARCHS)
 $(BUILD_DOCKER_ARCHS): common-docker-%:
-	docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
+	docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
 		-f $(DOCKERFILE_PATH) \
 		--build-arg ARCH="$*" \
 		--build-arg OS="linux" \
@@ -243,19 +224,19 @@
 .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
 common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
 $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
-	docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
+	docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
 
 DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
 .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
 common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
 $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
-	docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
-	docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
+	docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
+	docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
 
 .PHONY: common-docker-manifest
 common-docker-manifest:
-	DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
-	DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
+	DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
+	DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
 
 .PHONY: promu
 promu: $(PROMU)
@@ -280,12 +261,6 @@
 		| sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
 endif
 
-ifdef GOVENDOR
-.PHONY: $(GOVENDOR)
-$(GOVENDOR):
-	GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor
-endif
-
 .PHONY: precheck
 precheck::
 
@@ -300,3 +275,9 @@
 		exit 1; \
 	fi
 endef
+
+govulncheck: install-govulncheck
+	govulncheck ./...
+
+install-govulncheck:
+	command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
index 55d1e32..0718239 100644
--- a/vendor/github.com/prometheus/procfs/README.md
+++ b/vendor/github.com/prometheus/procfs/README.md
@@ -6,8 +6,8 @@
 *WARNING*: This package is a work in progress. Its API may still break in
 backwards-incompatible ways without warnings. Use it at your own risk.
 
-[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
-[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
+[![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/procfs.svg)](https://pkg.go.dev/github.com/prometheus/procfs)
+[![CircleCI](https://circleci.com/gh/prometheus/procfs/tree/master.svg?style=svg)](https://circleci.com/gh/prometheus/procfs/tree/master)
 [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
 
 ## Usage
@@ -47,15 +47,15 @@
 The procfs library includes a set of test fixtures which include many example files from
 the `/proc` and `/sys` filesystems.  These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
 which is extracted automatically during testing.  To add/update the test fixtures, first
-ensure the `fixtures` directory is up to date by removing the existing directory and then
-extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
+ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then
+extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`.
 
 ```bash
-rm -rf fixtures
+rm -rf testdata/fixtures
 make test
 ```
 
-Next, make the required changes to the extracted files in the `fixtures` directory.  When
+Next, make the required changes to the extracted files in the `testdata/fixtures` directory.  When
 the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
 based on the updated `fixtures` directory.  And finally, verify the changes using
-`git diff fixtures.ttar`.
+`git diff testdata/fixtures.ttar`.
diff --git a/vendor/github.com/prometheus/procfs/SECURITY.md b/vendor/github.com/prometheus/procfs/SECURITY.md
index 67741f0..fed02d8 100644
--- a/vendor/github.com/prometheus/procfs/SECURITY.md
+++ b/vendor/github.com/prometheus/procfs/SECURITY.md
@@ -3,4 +3,4 @@
 The Prometheus security policy, including how to report vulnerabilities, can be
 found here:
 
-https://prometheus.io/docs/operating/security/
+<https://prometheus.io/docs/operating/security/>
diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go
index 4e47e61..2e53344 100644
--- a/vendor/github.com/prometheus/procfs/arp.go
+++ b/vendor/github.com/prometheus/procfs/arp.go
@@ -15,11 +15,28 @@
 
 import (
 	"fmt"
-	"io/ioutil"
 	"net"
+	"os"
+	"strconv"
 	"strings"
 )
 
+// Learned from include/uapi/linux/if_arp.h.
+const (
+	// Completed entry (ha valid).
+	ATFComplete = 0x02
+	// Permanent entry.
+	ATFPermanent = 0x04
+	// Publish entry.
+	ATFPublish = 0x08
+	// Has requested trailers.
+	ATFUseTrailers = 0x10
+	// Obsoleted: Want to use a netmask (only for proxy entries).
+	ATFNetmask = 0x20
+	// Don't answer this addresses.
+	ATFDontPublish = 0x40
+)
+
 // ARPEntry contains a single row of the columnar data represented in
 // /proc/net/arp.
 type ARPEntry struct {
@@ -29,14 +46,16 @@
 	HWAddr net.HardwareAddr
 	// Name of the device
 	Device string
+	// Flags
+	Flags byte
 }
 
 // GatherARPEntries retrieves all the ARP entries, parse the relevant columns,
 // and then return a slice of ARPEntry's.
 func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
-	data, err := ioutil.ReadFile(fs.proc.Path("net/arp"))
+	data, err := os.ReadFile(fs.proc.Path("net/arp"))
 	if err != nil {
-		return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err)
+		return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
 	}
 
 	return parseARPEntries(data)
@@ -59,11 +78,11 @@
 		} else if width == expectedDataWidth {
 			entry, err := parseARPEntry(columns)
 			if err != nil {
-				return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err)
+				return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
 			}
 			entries = append(entries, entry)
 		} else {
-			return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth)
+			return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
 		}
 
 	}
@@ -72,14 +91,26 @@
 }
 
 func parseARPEntry(columns []string) (ARPEntry, error) {
+	entry := ARPEntry{Device: columns[5]}
 	ip := net.ParseIP(columns[0])
-	mac := net.HardwareAddr(columns[3])
+	entry.IPAddr = ip
 
-	entry := ARPEntry{
-		IPAddr: ip,
-		HWAddr: mac,
-		Device: columns[5],
+	if mac, err := net.ParseMAC(columns[3]); err == nil {
+		entry.HWAddr = mac
+	} else {
+		return ARPEntry{}, err
+	}
+
+	if flags, err := strconv.ParseUint(columns[2], 0, 8); err == nil {
+		entry.Flags = byte(flags)
+	} else {
+		return ARPEntry{}, err
 	}
 
 	return entry, nil
 }
+
+// IsComplete returns true if ARP entry is marked with complete flag.
+func (entry *ARPEntry) IsComplete() bool {
+	return entry.Flags&ATFComplete != 0
+}
diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go
index f5b7939..8380750 100644
--- a/vendor/github.com/prometheus/procfs/buddyinfo.go
+++ b/vendor/github.com/prometheus/procfs/buddyinfo.go
@@ -55,18 +55,18 @@
 		parts := strings.Fields(line)
 
 		if len(parts) < 4 {
-			return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")
+			return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts)
 		}
 
-		node := strings.TrimRight(parts[1], ",")
-		zone := strings.TrimRight(parts[3], ",")
+		node := strings.TrimSuffix(parts[1], ",")
+		zone := strings.TrimSuffix(parts[3], ",")
 		arraySize := len(parts[4:])
 
 		if bucketCount == -1 {
 			bucketCount = arraySize
 		} else {
 			if bucketCount != arraySize {
-				return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize)
+				return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize)
 			}
 		}
 
@@ -74,7 +74,7 @@
 		for i := 0; i < arraySize; i++ {
 			sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
 			if err != nil {
-				return nil, fmt.Errorf("invalid value in buddyinfo: %w", err)
+				return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
 			}
 		}
 
diff --git a/vendor/github.com/prometheus/procfs/cmdline.go b/vendor/github.com/prometheus/procfs/cmdline.go
new file mode 100644
index 0000000..bf4f3b4
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cmdline.go
@@ -0,0 +1,30 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// CmdLine returns the command line of the kernel.
+func (fs FS) CmdLine() ([]string, error) {
+	data, err := util.ReadFileNoStat(fs.proc.Path("cmdline"))
+	if err != nil {
+		return nil, err
+	}
+
+	return strings.Fields(string(data)), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go
index 5623b24..f0950bb 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo.go
@@ -11,6 +11,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+//go:build linux
 // +build linux
 
 package procfs
@@ -27,7 +28,7 @@
 	"github.com/prometheus/procfs/internal/util"
 )
 
-// CPUInfo contains general information about a system CPU found in /proc/cpuinfo
+// CPUInfo contains general information about a system CPU found in /proc/cpuinfo.
 type CPUInfo struct {
 	Processor       uint
 	VendorID        string
@@ -78,7 +79,7 @@
 	// find the first "processor" line
 	firstLine := firstNonEmptyLine(scanner)
 	if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
-		return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+		return nil, fmt.Errorf("%w: Cannot parse  line: %q", ErrFileParse, firstLine)
 	}
 	field := strings.SplitN(firstLine, ": ", 2)
 	v, err := strconv.ParseUint(field[1], 0, 32)
@@ -191,9 +192,10 @@
 	scanner := bufio.NewScanner(bytes.NewReader(info))
 
 	firstLine := firstNonEmptyLine(scanner)
-	match, _ := regexp.MatchString("^[Pp]rocessor", firstLine)
+	match, err := regexp.MatchString("^[Pp]rocessor", firstLine)
 	if !match || !strings.Contains(firstLine, ":") {
-		return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+		return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
+
 	}
 	field := strings.SplitN(firstLine, ": ", 2)
 	cpuinfo := []CPUInfo{}
@@ -257,7 +259,7 @@
 
 	firstLine := firstNonEmptyLine(scanner)
 	if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
-		return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+		return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine)
 	}
 	field := strings.SplitN(firstLine, ": ", 2)
 	cpuinfo := []CPUInfo{}
@@ -282,7 +284,7 @@
 		if strings.HasPrefix(line, "processor") {
 			match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
 			if len(match) < 2 {
-				return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+				return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
 			}
 			cpu := commonCPUInfo
 			v, err := strconv.ParseUint(match[1], 0, 32)
@@ -342,7 +344,7 @@
 	// find the first "processor" line
 	firstLine := firstNonEmptyLine(scanner)
 	if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
-		return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+		return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
 	}
 	field := strings.SplitN(firstLine, ": ", 2)
 	cpuinfo := []CPUInfo{}
@@ -379,12 +381,48 @@
 	return cpuinfo, nil
 }
 
+func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) {
+	scanner := bufio.NewScanner(bytes.NewReader(info))
+	// find the first "processor" line
+	firstLine := firstNonEmptyLine(scanner)
+	if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
+		return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
+	}
+	field := strings.SplitN(firstLine, ": ", 2)
+	cpuinfo := []CPUInfo{}
+	systemType := field[1]
+	i := 0
+	for scanner.Scan() {
+		line := scanner.Text()
+		if !strings.Contains(line, ":") {
+			continue
+		}
+		field := strings.SplitN(line, ": ", 2)
+		switch strings.TrimSpace(field[0]) {
+		case "processor":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			i = int(v)
+			cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+			cpuinfo[i].Processor = uint(v)
+			cpuinfo[i].VendorID = systemType
+		case "CPU Family":
+			cpuinfo[i].CPUFamily = field[1]
+		case "Model Name":
+			cpuinfo[i].ModelName = field[1]
+		}
+	}
+	return cpuinfo, nil
+}
+
 func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
 	scanner := bufio.NewScanner(bytes.NewReader(info))
 
 	firstLine := firstNonEmptyLine(scanner)
 	if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
-		return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+		return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
 	}
 	field := strings.SplitN(firstLine, ": ", 2)
 	v, err := strconv.ParseUint(field[1], 0, 32)
@@ -429,7 +467,7 @@
 
 	firstLine := firstNonEmptyLine(scanner)
 	if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
-		return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+		return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
 	}
 	field := strings.SplitN(firstLine, ": ", 2)
 	v, err := strconv.ParseUint(field[1], 0, 32)
@@ -469,7 +507,7 @@
 }
 
 // firstNonEmptyLine advances the scanner to the first non-empty line
-// and returns the contents of that line
+// and returns the contents of that line.
 func firstNonEmptyLine(scanner *bufio.Scanner) string {
 	for scanner.Scan() {
 		line := scanner.Text()
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go
index 44b590e..64cfd53 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go
@@ -11,6 +11,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+//go:build linux && (arm || arm64)
 // +build linux
 // +build arm arm64
 
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
new file mode 100644
index 0000000..d88442f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
@@ -0,0 +1,19 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+// +build linux
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoLoong
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
index 91e2725..c11207f 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
@@ -11,6 +11,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+//go:build linux && (mips || mipsle || mips64 || mips64le)
 // +build linux
 // +build mips mipsle mips64 mips64le
 
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go
index 95b5b4e..a6b2b31 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go
@@ -11,8 +11,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// +build linux
-// +build !386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
+//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x
+// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
 
 package procfs
 
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
index 6068bd5..003bc2a 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
@@ -11,6 +11,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+//go:build linux && (ppc64 || ppc64le)
 // +build linux
 // +build ppc64 ppc64le
 
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
index e83c2e2..1c9b731 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
@@ -11,6 +11,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+//go:build linux && (riscv || riscv64)
 // +build linux
 // +build riscv riscv64
 
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
index 26814ee..fa3686b 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
@@ -11,6 +11,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+//go:build linux
 // +build linux
 
 package procfs
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go
index d5bedf9..a0ef555 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go
@@ -11,6 +11,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+//go:build linux && (386 || amd64)
 // +build linux
 // +build 386 amd64
 
diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go
index 5048ad1..5f2a37a 100644
--- a/vendor/github.com/prometheus/procfs/crypto.go
+++ b/vendor/github.com/prometheus/procfs/crypto.go
@@ -55,12 +55,13 @@
 	path := fs.proc.Path("crypto")
 	b, err := util.ReadFileNoStat(path)
 	if err != nil {
-		return nil, fmt.Errorf("error reading crypto %q: %w", path, err)
+		return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err)
+
 	}
 
 	crypto, err := parseCrypto(bytes.NewReader(b))
 	if err != nil {
-		return nil, fmt.Errorf("error parsing crypto %q: %w", path, err)
+		return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err)
 	}
 
 	return crypto, nil
@@ -83,7 +84,7 @@
 
 		kv := strings.Split(text, ":")
 		if len(kv) != 2 {
-			return nil, fmt.Errorf("malformed crypto line: %q", text)
+			return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text)
 		}
 
 		k := strings.TrimSpace(kv[0])
diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go
index e2acd6d..f9d961e 100644
--- a/vendor/github.com/prometheus/procfs/doc.go
+++ b/vendor/github.com/prometheus/procfs/doc.go
@@ -16,30 +16,29 @@
 //
 // Example:
 //
-//    package main
+//	package main
 //
-//    import (
-//    	"fmt"
-//    	"log"
+//	import (
+//		"fmt"
+//		"log"
 //
-//    	"github.com/prometheus/procfs"
-//    )
+//		"github.com/prometheus/procfs"
+//	)
 //
-//    func main() {
-//    	p, err := procfs.Self()
-//    	if err != nil {
-//    		log.Fatalf("could not get process: %s", err)
-//    	}
+//	func main() {
+//		p, err := procfs.Self()
+//		if err != nil {
+//			log.Fatalf("could not get process: %s", err)
+//		}
 //
-//    	stat, err := p.NewStat()
-//    	if err != nil {
-//    		log.Fatalf("could not get process stat: %s", err)
-//    	}
+//		stat, err := p.Stat()
+//		if err != nil {
+//			log.Fatalf("could not get process stat: %s", err)
+//		}
 //
-//    	fmt.Printf("command:  %s\n", stat.Comm)
-//    	fmt.Printf("cpu time: %fs\n", stat.CPUTime())
-//    	fmt.Printf("vsize:    %dB\n", stat.VirtualMemory())
-//    	fmt.Printf("rss:      %dB\n", stat.ResidentMemory())
-//    }
-//
+//		fmt.Printf("command:  %s\n", stat.Comm)
+//		fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+//		fmt.Printf("vsize:    %dB\n", stat.VirtualMemory())
+//		fmt.Printf("rss:      %dB\n", stat.ResidentMemory())
+//	}
 package procfs
diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar
deleted file mode 100644
index 1e76173..0000000
--- a/vendor/github.com/prometheus/procfs/fixtures.ttar
+++ /dev/null
@@ -1,6553 +0,0 @@
-# Archive created by ttar -c -f fixtures.ttar fixtures/
-Directory: fixtures
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26231
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/cmdline
-Lines: 1
-vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/comm
-Lines: 1
-vim
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/cwd
-SymlinkTo: /usr/bin
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/environ
-Lines: 1
-PATH=/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/binNULLBYTEHOSTNAME=cd24e11f73a5NULLBYTETERM=xtermNULLBYTEGOLANG_VERSION=1.12.5NULLBYTEGOPATH=/goNULLBYTEHOME=/rootNULLBYTEEOF
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/exe
-SymlinkTo: /usr/bin/vim
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26231/fd
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fd/0
-SymlinkTo: ../../symlinktargets/abc
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fd/1
-SymlinkTo: ../../symlinktargets/def
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fd/10
-SymlinkTo: ../../symlinktargets/xyz
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fd/2
-SymlinkTo: ../../symlinktargets/ghi
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fd/3
-SymlinkTo: ../../symlinktargets/uvw
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26231/fdinfo
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/0
-Lines: 6
-pos:	0
-flags:	02004000
-mnt_id:	13
-inotify wd:3 ino:1 sdev:34 mask:fce ignored_mask:0 fhandle-bytes:c fhandle-type:81 f_handle:000000000100000000000000
-inotify wd:2 ino:1300016 sdev:fd00002 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:16003001ed3f022a
-inotify wd:1 ino:2e0001 sdev:fd00000 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:01002e00138e7c65
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/1
-Lines: 4
-pos:	0
-flags:	02004002
-mnt_id:	13
-eventfd-count:                0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/10
-Lines: 3
-pos:	0
-flags:	02004002
-mnt_id:	9
-Mode: 400
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/2
-Lines: 3
-pos:	0
-flags:	02004002
-mnt_id:	9
-Mode: 400
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/3
-Lines: 3
-pos:	0
-flags:	02004002
-mnt_id:	9
-Mode: 400
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/io
-Lines: 7
-rchar: 750339
-wchar: 818609
-syscr: 7405
-syscw: 5245
-read_bytes: 1024
-write_bytes: 2048
-cancelled_write_bytes: -1024
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/limits
-Lines: 17
-Limit                     Soft Limit           Hard Limit           Units
-Max cpu time              unlimited            unlimited            seconds
-Max file size             unlimited            unlimited            bytes
-Max data size             unlimited            unlimited            bytes
-Max stack size            8388608              unlimited            bytes
-Max core file size        0                    unlimited            bytes
-Max resident set          unlimited            unlimited            bytes
-Max processes             62898                62898                processes
-Max open files            2048                 4096                 files
-Max locked memory         18446744073708503040 18446744073708503040 bytes
-Max address space         8589934592           unlimited            bytes
-Max file locks            unlimited            unlimited            locks
-Max pending signals       62898                62898                signals
-Max msgqueue size         819200               819200               bytes
-Max nice priority         0                    0
-Max realtime priority     0                    0
-Max realtime timeout      unlimited            unlimited            us
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/mountstats
-Lines: 20
-device rootfs mounted on / with fstype rootfs
-device sysfs mounted on /sys with fstype sysfs
-device proc mounted on /proc with fstype proc
-device /dev/sda1 mounted on / with fstype ext4
-device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1
-	opts:	rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,mountaddr=192.168.1.1,clientaddr=192.168.1.5,local_lock=none
-	age:	13968
-	caps:	caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255
-	nfsv4:	bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured
-	sec:	flavor=1,pseudoflavor=1
-	events:	52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0
-	bytes:	1207640230 0 0 0 1210214218 0 295483 0
-	RPC iostats version: 1.0  p/v: 100003/4 (nfs)
-	xprt:	tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726
-	per-op statistics
-	        NULL: 0 0 0 0 0 0 0 0
-	        READ: 1298 1298 0 207680 1210292152 6 79386 79407
-	       WRITE: 0 0 0 0 0 0 0 0
-	      ACCESS: 2927395007 2927394995 0 526931094212 362996810236 18446743919241604546 1667369447 1953587717
-
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26231/net
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/net/dev
-Lines: 4
-Inter-|   Receive                                                |  Transmit
- face |bytes    packets errs drop fifo frame compressed multicast|bytes    packets errs drop fifo colls carrier compressed
-    lo:       0       0    0    0    0     0          0         0        0       0    0    0    0     0       0          0
-  eth0:     438       5    0    0    0     0          0         0      648       8    0    0    0     0       0          0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26231/ns
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/ns/mnt
-SymlinkTo: mnt:[4026531840]
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/ns/net
-SymlinkTo: net:[4026531993]
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/root
-SymlinkTo: /
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/schedstat
-Lines: 1
-411605849 93680043 79
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/smaps
-Lines: 252
-00400000-00cb1000 r-xp 00000000 fd:01 952273                             /bin/alertmanager
-Size:               8900 kB
-KernelPageSize:        4 kB
-MMUPageSize:           4 kB
-Rss:                2952 kB
-Pss:                2952 kB
-Shared_Clean:          0 kB
-Shared_Dirty:          0 kB
-Private_Clean:      2952 kB
-Private_Dirty:         0 kB
-Referenced:         2864 kB
-Anonymous:             0 kB
-LazyFree:              0 kB
-AnonHugePages:         0 kB
-ShmemPmdMapped:        0 kB
-Shared_Hugetlb:        0 kB
-Private_Hugetlb:       0 kB
-Swap:                  0 kB
-SwapPss:               0 kB
-Locked:                0 kB
-VmFlags: rd ex mr mw me dw sd 
-00cb1000-016b0000 r--p 008b1000 fd:01 952273                             /bin/alertmanager
-Size:              10236 kB
-KernelPageSize:        4 kB
-MMUPageSize:           4 kB
-Rss:                6152 kB
-Pss:                6152 kB
-Shared_Clean:          0 kB
-Shared_Dirty:          0 kB
-Private_Clean:      6152 kB
-Private_Dirty:         0 kB
-Referenced:         5308 kB
-Anonymous:             0 kB
-LazyFree:              0 kB
-AnonHugePages:         0 kB
-ShmemPmdMapped:        0 kB
-Shared_Hugetlb:        0 kB
-Private_Hugetlb:       0 kB
-Swap:                  0 kB
-SwapPss:               0 kB
-Locked:                0 kB
-VmFlags: rd mr mw me dw sd 
-016b0000-0171a000 rw-p 012b0000 fd:01 952273                             /bin/alertmanager
-Size:                424 kB
-KernelPageSize:        4 kB
-MMUPageSize:           4 kB
-Rss:                 176 kB
-Pss:                 176 kB
-Shared_Clean:          0 kB
-Shared_Dirty:          0 kB
-Private_Clean:        84 kB
-Private_Dirty:        92 kB
-Referenced:          176 kB
-Anonymous:            92 kB
-LazyFree:              0 kB
-AnonHugePages:         0 kB
-ShmemPmdMapped:        0 kB
-Shared_Hugetlb:        0 kB
-Private_Hugetlb:       0 kB
-Swap:                 12 kB
-SwapPss:              12 kB
-Locked:                0 kB
-VmFlags: rd wr mr mw me dw ac sd 
-0171a000-0173f000 rw-p 00000000 00:00 0 
-Size:                148 kB
-KernelPageSize:        4 kB
-MMUPageSize:           4 kB
-Rss:                  76 kB
-Pss:                  76 kB
-Shared_Clean:          0 kB
-Shared_Dirty:          0 kB
-Private_Clean:         0 kB
-Private_Dirty:        76 kB
-Referenced:           76 kB
-Anonymous:            76 kB
-LazyFree:              0 kB
-AnonHugePages:         0 kB
-ShmemPmdMapped:        0 kB
-Shared_Hugetlb:        0 kB
-Private_Hugetlb:       0 kB
-Swap:                  0 kB
-SwapPss:               0 kB
-Locked:                0 kB
-VmFlags: rd wr mr mw me ac sd 
-c000000000-c000400000 rw-p 00000000 00:00 0 
-Size:               4096 kB
-KernelPageSize:        4 kB
-MMUPageSize:           4 kB
-Rss:                2564 kB
-Pss:                2564 kB
-Shared_Clean:          0 kB
-Shared_Dirty:          0 kB
-Private_Clean:        20 kB
-Private_Dirty:      2544 kB
-Referenced:         2544 kB
-Anonymous:          2564 kB
-LazyFree:              0 kB
-AnonHugePages:         0 kB
-ShmemPmdMapped:        0 kB
-Shared_Hugetlb:        0 kB
-Private_Hugetlb:       0 kB
-Swap:               1100 kB
-SwapPss:            1100 kB
-Locked:                0 kB
-VmFlags: rd wr mr mw me ac sd 
-c000400000-c001600000 rw-p 00000000 00:00 0 
-Size:              18432 kB
-KernelPageSize:        4 kB
-MMUPageSize:           4 kB
-Rss:               16024 kB
-Pss:               16024 kB
-Shared_Clean:          0 kB
-Shared_Dirty:          0 kB
-Private_Clean:      5864 kB
-Private_Dirty:     10160 kB
-Referenced:        11944 kB
-Anonymous:         16024 kB
-LazyFree:           5848 kB
-AnonHugePages:         0 kB
-ShmemPmdMapped:        0 kB
-Shared_Hugetlb:        0 kB
-Private_Hugetlb:       0 kB
-Swap:                440 kB
-SwapPss:             440 kB
-Locked:                0 kB
-VmFlags: rd wr mr mw me ac sd nh 
-c001600000-c004000000 rw-p 00000000 00:00 0 
-Size:              43008 kB
-KernelPageSize:        4 kB
-MMUPageSize:           4 kB
-Rss:                   0 kB
-Pss:                   0 kB
-Shared_Clean:          0 kB
-Shared_Dirty:          0 kB
-Private_Clean:         0 kB
-Private_Dirty:         0 kB
-Referenced:            0 kB
-Anonymous:             0 kB
-LazyFree:              0 kB
-AnonHugePages:         0 kB
-ShmemPmdMapped:        0 kB
-Shared_Hugetlb:        0 kB
-Private_Hugetlb:       0 kB
-Swap:                  0 kB
-SwapPss:               0 kB
-Locked:                0 kB
-VmFlags: rd wr mr mw me ac sd 
-7f0ab95ca000-7f0abbb7b000 rw-p 00000000 00:00 0 
-Size:              38596 kB
-KernelPageSize:        4 kB
-MMUPageSize:           4 kB
-Rss:                1992 kB
-Pss:                1992 kB
-Shared_Clean:          0 kB
-Shared_Dirty:          0 kB
-Private_Clean:       476 kB
-Private_Dirty:      1516 kB
-Referenced:         1828 kB
-Anonymous:          1992 kB
-LazyFree:              0 kB
-AnonHugePages:         0 kB
-ShmemPmdMapped:        0 kB
-Shared_Hugetlb:        0 kB
-Private_Hugetlb:       0 kB
-Swap:                384 kB
-SwapPss:             384 kB
-Locked:                0 kB
-VmFlags: rd wr mr mw me ac sd 
-7ffc07ecf000-7ffc07ef0000 rw-p 00000000 00:00 0                          [stack]
-Size:                132 kB
-KernelPageSize:        4 kB
-MMUPageSize:           4 kB
-Rss:                   8 kB
-Pss:                   8 kB
-Shared_Clean:          0 kB
-Shared_Dirty:          0 kB
-Private_Clean:         0 kB
-Private_Dirty:         8 kB
-Referenced:            8 kB
-Anonymous:             8 kB
-LazyFree:              0 kB
-AnonHugePages:         0 kB
-ShmemPmdMapped:        0 kB
-Shared_Hugetlb:        0 kB
-Private_Hugetlb:       0 kB
-Swap:                  4 kB
-SwapPss:               4 kB
-Locked:                0 kB
-VmFlags: rd wr mr mw me gd ac 
-7ffc07f9e000-7ffc07fa1000 r--p 00000000 00:00 0                          [vvar]
-Size:                 12 kB
-KernelPageSize:        4 kB
-MMUPageSize:           4 kB
-Rss:                   0 kB
-Pss:                   0 kB
-Shared_Clean:          0 kB
-Shared_Dirty:          0 kB
-Private_Clean:         0 kB
-Private_Dirty:         0 kB
-Referenced:            0 kB
-Anonymous:             0 kB
-LazyFree:              0 kB
-AnonHugePages:         0 kB
-ShmemPmdMapped:        0 kB
-Shared_Hugetlb:        0 kB
-Private_Hugetlb:       0 kB
-Swap:                  0 kB
-SwapPss:               0 kB
-Locked:                0 kB
-VmFlags: rd mr pf io de dd sd 
-7ffc07fa1000-7ffc07fa3000 r-xp 00000000 00:00 0                          [vdso]
-Size:                  8 kB
-KernelPageSize:        4 kB
-MMUPageSize:           4 kB
-Rss:                   4 kB
-Pss:                   0 kB
-Shared_Clean:          4 kB
-Shared_Dirty:          0 kB
-Private_Clean:         0 kB
-Private_Dirty:         0 kB
-Referenced:            4 kB
-Anonymous:             0 kB
-LazyFree:              0 kB
-AnonHugePages:         0 kB
-ShmemPmdMapped:        0 kB
-Shared_Hugetlb:        0 kB
-Private_Hugetlb:       0 kB
-Swap:                  0 kB
-SwapPss:               0 kB
-Locked:                0 kB
-VmFlags: rd ex mr mw me de sd 
-ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0                  [vsyscall]
-Size:                  4 kB
-KernelPageSize:        4 kB
-MMUPageSize:           4 kB
-Rss:                   0 kB
-Pss:                   0 kB
-Shared_Clean:          0 kB
-Shared_Dirty:          0 kB
-Private_Clean:         0 kB
-Private_Dirty:         0 kB
-Referenced:            0 kB
-Anonymous:             0 kB
-LazyFree:              0 kB
-AnonHugePages:         0 kB
-ShmemPmdMapped:        0 kB
-Shared_Hugetlb:        0 kB
-Private_Hugetlb:       0 kB
-Swap:                  0 kB
-SwapPss:               0 kB
-Locked:                0 kB
-VmFlags: rd ex 
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/smaps_rollup
-Lines: 17
-00400000-ffffffffff601000 ---p 00000000 00:00 0                          [rollup]
-Rss:               29948 kB
-Pss:               29944 kB
-Shared_Clean:          4 kB
-Shared_Dirty:          0 kB
-Private_Clean:     15548 kB
-Private_Dirty:     14396 kB
-Referenced:        24752 kB
-Anonymous:         20756 kB
-LazyFree:           5848 kB
-AnonHugePages:         0 kB
-ShmemPmdMapped:        0 kB
-Shared_Hugetlb:        0 kB
-Private_Hugetlb:       0 kB
-Swap:               1940 kB
-SwapPss:            1940 kB
-Locked:                0 kB
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/stat
-Lines: 1
-26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/status
-Lines: 53
-
-Name:	prometheus
-Umask:	0022
-State:	S (sleeping)
-Tgid:	26231
-Ngid:	0
-Pid:	26231
-PPid:	1
-TracerPid:	0
-Uid:	1000	1000	1000	0
-Gid:	1001	1001	1001	0
-FDSize:	128
-Groups:
-NStgid:	1
-NSpid:	1
-NSpgid:	1
-NSsid:	1
-VmPeak:	   58472 kB
-VmSize:	   58440 kB
-VmLck:	       0 kB
-VmPin:	       0 kB
-VmHWM:	    8028 kB
-VmRSS:	    6716 kB
-RssAnon:	    2092 kB
-RssFile:	    4624 kB
-RssShmem:	       0 kB
-VmData:	    2580 kB
-VmStk:	     136 kB
-VmExe:	     948 kB
-VmLib:	    6816 kB
-VmPTE:	     128 kB
-VmPMD:	      12 kB
-VmSwap:	     660 kB
-HugetlbPages:	       0 kB
-Threads:	1
-SigQ:	8/63965
-SigPnd:	0000000000000000
-ShdPnd:	0000000000000000
-SigBlk:	7be3c0fe28014a03
-SigIgn:	0000000000001000
-SigCgt:	00000001800004ec
-CapInh:	0000000000000000
-CapPrm:	0000003fffffffff
-CapEff:	0000003fffffffff
-CapBnd:	0000003fffffffff
-CapAmb:	0000000000000000
-Seccomp:	0
-Cpus_allowed:	ff
-Cpus_allowed_list:	0-7
-Mems_allowed:	00000000,00000001
-Mems_allowed_list:	0
-voluntary_ctxt_switches:	4742839
-nonvoluntary_ctxt_switches:	1727500
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/wchan
-Lines: 1
-poll_schedule_timeoutEOF
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26232
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/cmdline
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/comm
-Lines: 1
-ata_sff
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/cwd
-SymlinkTo: /does/not/exist
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26232/fd
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/fd/0
-SymlinkTo: ../../symlinktargets/abc
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/fd/1
-SymlinkTo: ../../symlinktargets/def
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/fd/2
-SymlinkTo: ../../symlinktargets/ghi
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/fd/3
-SymlinkTo: ../../symlinktargets/uvw
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/fd/4
-SymlinkTo: ../../symlinktargets/xyz
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/limits
-Lines: 17
-Limit                     Soft Limit           Hard Limit           Units
-Max cpu time              unlimited            unlimited            seconds
-Max file size             unlimited            unlimited            bytes
-Max data size             unlimited            unlimited            bytes
-Max stack size            8388608              unlimited            bytes
-Max core file size        0                    unlimited            bytes
-Max resident set          unlimited            unlimited            bytes
-Max processes             29436                29436                processes
-Max open files            1024                 4096                 files
-Max locked memory         65536                65536                bytes
-Max address space         unlimited            unlimited            bytes
-Max file locks            unlimited            unlimited            locks
-Max pending signals       29436                29436                signals
-Max msgqueue size         819200               819200               bytes
-Max nice priority         0                    0
-Max realtime priority     0                    0
-Max realtime timeout      unlimited            unlimited            us
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/maps
-Lines: 9
-55680ae1e000-55680ae20000 r--p 00000000 fd:01 47316994                   /bin/cat
-55680ae29000-55680ae2a000 rwxs 0000a000 fd:01 47316994                   /bin/cat
-55680bed6000-55680bef7000 rw-p 00000000 00:00 0                          [heap]
-7fdf964fc000-7fdf973f2000 r--p 00000000 fd:01 17432624                   /usr/lib/locale/locale-archive
-7fdf973f2000-7fdf97417000 r--p 00000000 fd:01 60571062                   /lib/x86_64-linux-gnu/libc-2.29.so
-7ffe9215c000-7ffe9217f000 rw-p 00000000 00:00 0                          [stack]
-7ffe921da000-7ffe921dd000 r--p 00000000 00:00 0                          [vvar]
-7ffe921dd000-7ffe921de000 r-xp 00000000 00:00 0                          [vdso]
-ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0                  [vsyscall]
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/root
-SymlinkTo: /does/not/exist
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/stat
-Lines: 1
-33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/wchan
-Lines: 1
-0EOF
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26233
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26233/cmdline
-Lines: 1
-com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26233/schedstat
-Lines: 8
- ____________________________________
-< this is a malformed schedstat file >
- ------------------------------------
-        \   ^__^
-         \  (oo)\_______
-            (__)\       )\/\
-                ||----w |
-                ||     ||
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26234
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26234/maps
-Lines: 4
-08048000-08089000 r-xp 00000000 03:01 104219 /bin/tcsh
-08089000-0808c000 rw-p 00041000 03:01 104219 /bin/tcsh
-0808c000-08146000 rwxp 00000000 00:00 0
-40000000-40015000 r-xp 00000000 03:01 61874 /lib/ld-2.3.2.so
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/584
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/584/stat
-Lines: 2
-1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0
-#!/bin/cat /proc/self/stat
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/buddyinfo
-Lines: 3
-Node 0, zone      DMA      1      0      1      0      2      1      1      0      1      1      3
-Node 0, zone    DMA32    759    572    791    475    194     45     12      0      0      0      0
-Node 0, zone   Normal   4381   1093    185   1530    567    102      4      0      0      0      0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/cpuinfo
-Lines: 216
-processor	: 0
-vendor_id	: GenuineIntel
-cpu family	: 6
-model		: 142
-model name	: Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping	: 10
-microcode	: 0xb4
-cpu MHz		: 799.998
-cache size	: 8192 KB
-physical id	: 0
-siblings	: 8
-core id		: 0
-cpu cores	: 4
-apicid		: 0
-initial apicid	: 0
-fpu		: yes
-fpu_exception	: yes
-cpuid level	: 22
-wp		: yes
-flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs		: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips	: 4224.00
-clflush size	: 64
-cache_alignment	: 64
-address sizes	: 39 bits physical, 48 bits virtual
-power management:
-
-processor	: 1
-vendor_id	: GenuineIntel
-cpu family	: 6
-model		: 142
-model name	: Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping	: 10
-microcode	: 0xb4
-cpu MHz		: 800.037
-cache size	: 8192 KB
-physical id	: 0
-siblings	: 8
-core id		: 1
-cpu cores	: 4
-apicid		: 2
-initial apicid	: 2
-fpu		: yes
-fpu_exception	: yes
-cpuid level	: 22
-wp		: yes
-flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs		: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips	: 4224.00
-clflush size	: 64
-cache_alignment	: 64
-address sizes	: 39 bits physical, 48 bits virtual
-power management:
-
-processor	: 2
-vendor_id	: GenuineIntel
-cpu family	: 6
-model		: 142
-model name	: Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping	: 10
-microcode	: 0xb4
-cpu MHz		: 800.010
-cache size	: 8192 KB
-physical id	: 0
-siblings	: 8
-core id		: 2
-cpu cores	: 4
-apicid		: 4
-initial apicid	: 4
-fpu		: yes
-fpu_exception	: yes
-cpuid level	: 22
-wp		: yes
-flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs		: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips	: 4224.00
-clflush size	: 64
-cache_alignment	: 64
-address sizes	: 39 bits physical, 48 bits virtual
-power management:
-
-processor	: 3
-vendor_id	: GenuineIntel
-cpu family	: 6
-model		: 142
-model name	: Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping	: 10
-microcode	: 0xb4
-cpu MHz		: 800.028
-cache size	: 8192 KB
-physical id	: 0
-siblings	: 8
-core id		: 3
-cpu cores	: 4
-apicid		: 6
-initial apicid	: 6
-fpu		: yes
-fpu_exception	: yes
-cpuid level	: 22
-wp		: yes
-flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs		: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips	: 4224.00
-clflush size	: 64
-cache_alignment	: 64
-address sizes	: 39 bits physical, 48 bits virtual
-power management:
-
-processor	: 4
-vendor_id	: GenuineIntel
-cpu family	: 6
-model		: 142
-model name	: Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping	: 10
-microcode	: 0xb4
-cpu MHz		: 799.989
-cache size	: 8192 KB
-physical id	: 0
-siblings	: 8
-core id		: 0
-cpu cores	: 4
-apicid		: 1
-initial apicid	: 1
-fpu		: yes
-fpu_exception	: yes
-cpuid level	: 22
-wp		: yes
-flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs		: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips	: 4224.00
-clflush size	: 64
-cache_alignment	: 64
-address sizes	: 39 bits physical, 48 bits virtual
-power management:
-
-processor	: 5
-vendor_id	: GenuineIntel
-cpu family	: 6
-model		: 142
-model name	: Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping	: 10
-microcode	: 0xb4
-cpu MHz		: 800.083
-cache size	: 8192 KB
-physical id	: 0
-siblings	: 8
-core id		: 1
-cpu cores	: 4
-apicid		: 3
-initial apicid	: 3
-fpu		: yes
-fpu_exception	: yes
-cpuid level	: 22
-wp		: yes
-flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs		: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips	: 4224.00
-clflush size	: 64
-cache_alignment	: 64
-address sizes	: 39 bits physical, 48 bits virtual
-power management:
-
-processor	: 6
-vendor_id	: GenuineIntel
-cpu family	: 6
-model		: 142
-model name	: Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping	: 10
-microcode	: 0xb4
-cpu MHz		: 800.017
-cache size	: 8192 KB
-physical id	: 0
-siblings	: 8
-core id		: 2
-cpu cores	: 4
-apicid		: 5
-initial apicid	: 5
-fpu		: yes
-fpu_exception	: yes
-cpuid level	: 22
-wp		: yes
-flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs		: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips	: 4224.00
-clflush size	: 64
-cache_alignment	: 64
-address sizes	: 39 bits physical, 48 bits virtual
-power management:
-
-processor	: 7
-vendor_id	: GenuineIntel
-cpu family	: 6
-model		: 142
-model name	: Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping	: 10
-microcode	: 0xb4
-cpu MHz		: 800.030
-cache size	: 8192 KB
-physical id	: 0
-siblings	: 8
-core id		: 3
-cpu cores	: 4
-apicid		: 7
-initial apicid	: 7
-fpu		: yes
-fpu_exception	: yes
-cpuid level	: 22
-wp		: yes
-flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs		: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips	: 4224.00
-clflush size	: 64
-cache_alignment	: 64
-address sizes	: 39 bits physical, 48 bits virtual
-power management:
-
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/crypto
-Lines: 972
-name         : ccm(aes)
-driver       : ccm_base(ctr(aes-aesni),cbcmac(aes-aesni))
-module       : ccm
-priority     : 300
-refcnt       : 4
-selftest     : passed
-internal     : no
-type         : aead
-async        : no
-blocksize    : 1
-ivsize       : 16
-maxauthsize  : 16
-geniv        : <none>
-
-name         : cbcmac(aes)
-driver       : cbcmac(aes-aesni)
-module       : ccm
-priority     : 300
-refcnt       : 7
-selftest     : passed
-internal     : no
-type         : shash
-blocksize    : 1
-digestsize   : 16
-
-name         : ecdh
-driver       : ecdh-generic
-module       : ecdh_generic
-priority     : 100
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : kpp
-async        : yes
-
-name         : ecb(arc4)
-driver       : ecb(arc4)-generic
-module       : arc4
-priority     : 100
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : skcipher
-async        : no
-blocksize    : 1
-min keysize  : 1
-max keysize  : 256
-ivsize       : 0
-chunksize    : 1
-walksize     : 1
-
-name         : arc4
-driver       : arc4-generic
-module       : arc4
-priority     : 0
-refcnt       : 3
-selftest     : passed
-internal     : no
-type         : cipher
-blocksize    : 1
-min keysize  : 1
-max keysize  : 256
-
-name         : crct10dif
-driver       : crct10dif-pclmul
-module       : crct10dif_pclmul
-priority     : 200
-refcnt       : 2
-selftest     : passed
-internal     : no
-type         : shash
-blocksize    : 1
-digestsize   : 2
-
-name         : crc32
-driver       : crc32-pclmul
-module       : crc32_pclmul
-priority     : 200
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : shash
-blocksize    : 1
-digestsize   : 4
-
-name         : __ghash
-driver       : cryptd(__ghash-pclmulqdqni)
-module       : kernel
-priority     : 50
-refcnt       : 1
-selftest     : passed
-internal     : yes
-type         : ahash
-async        : yes
-blocksize    : 16
-digestsize   : 16
-
-name         : ghash
-driver       : ghash-clmulni
-module       : ghash_clmulni_intel
-priority     : 400
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : ahash
-async        : yes
-blocksize    : 16
-digestsize   : 16
-
-name         : __ghash
-driver       : __ghash-pclmulqdqni
-module       : ghash_clmulni_intel
-priority     : 0
-refcnt       : 1
-selftest     : passed
-internal     : yes
-type         : shash
-blocksize    : 16
-digestsize   : 16
-
-name         : crc32c
-driver       : crc32c-intel
-module       : crc32c_intel
-priority     : 200
-refcnt       : 5
-selftest     : passed
-internal     : no
-type         : shash
-blocksize    : 1
-digestsize   : 4
-
-name         : cbc(aes)
-driver       : cbc(aes-aesni)
-module       : kernel
-priority     : 300
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : skcipher
-async        : no
-blocksize    : 16
-min keysize  : 16
-max keysize  : 32
-ivsize       : 16
-chunksize    : 16
-walksize     : 16
-
-name         : ctr(aes)
-driver       : ctr(aes-aesni)
-module       : kernel
-priority     : 300
-refcnt       : 5
-selftest     : passed
-internal     : no
-type         : skcipher
-async        : no
-blocksize    : 1
-min keysize  : 16
-max keysize  : 32
-ivsize       : 16
-chunksize    : 16
-walksize     : 16
-
-name         : pkcs1pad(rsa,sha256)
-driver       : pkcs1pad(rsa-generic,sha256)
-module       : kernel
-priority     : 100
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : akcipher
-
-name         : __xts(aes)
-driver       : cryptd(__xts-aes-aesni)
-module       : kernel
-priority     : 451
-refcnt       : 1
-selftest     : passed
-internal     : yes
-type         : skcipher
-async        : yes
-blocksize    : 16
-min keysize  : 32
-max keysize  : 64
-ivsize       : 16
-chunksize    : 16
-walksize     : 16
-
-name         : xts(aes)
-driver       : xts-aes-aesni
-module       : kernel
-priority     : 401
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : skcipher
-async        : yes
-blocksize    : 16
-min keysize  : 32
-max keysize  : 64
-ivsize       : 16
-chunksize    : 16
-walksize     : 16
-
-name         : __ctr(aes)
-driver       : cryptd(__ctr-aes-aesni)
-module       : kernel
-priority     : 450
-refcnt       : 1
-selftest     : passed
-internal     : yes
-type         : skcipher
-async        : yes
-blocksize    : 1
-max keysize  : 32
-ivsize       : 16
-chunksize    : 16
-walksize     : 16
-
-name         : ctr(aes)
-driver       : ctr-aes-aesni
-module       : kernel
-priority     : 400
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : skcipher
-async        : yes
-blocksize    : 1
-min keysize  : 16
-max keysize  : 32
-ivsize       : 16
-chunksize    : 16
-walksize     : 16
-
-name         : __cbc(aes)
-driver       : cryptd(__cbc-aes-aesni)
-module       : kernel
-priority     : 450
-refcnt       : 1
-selftest     : passed
-internal     : yes
-type         : skcipher
-async        : yes
-blocksize    : 16
-min keysize  : 16
-max keysize  : 32
-ivsize       : 16
-chunksize    : 16
-walksize     : 16
-
-name         : cbc(aes)
-driver       : cbc-aes-aesni
-module       : kernel
-priority     : 400
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : skcipher
-async        : yes
-blocksize    : 16
-min keysize  : 16
-max keysize  : 32
-ivsize       : 16
-chunksize    : 16
-walksize     : 16
-
-name         : __ecb(aes)
-driver       : cryptd(__ecb-aes-aesni)
-module       : kernel
-priority     : 450
-refcnt       : 1
-selftest     : passed
-internal     : yes
-type         : skcipher
-async        : yes
-blocksize    : 16
-min keysize  : 16
-max keysize  : 32
-ivsize       : 0
-chunksize    : 16
-walksize     : 16
-
-name         : ecb(aes)
-driver       : ecb-aes-aesni
-module       : kernel
-priority     : 400
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : skcipher
-async        : yes
-blocksize    : 16
-min keysize  : 16
-max keysize  : 32
-ivsize       : 0
-chunksize    : 16
-walksize     : 16
-
-name         : __generic-gcm-aes-aesni
-driver       : cryptd(__driver-generic-gcm-aes-aesni)
-module       : kernel
-priority     : 50
-refcnt       : 1
-selftest     : passed
-internal     : yes
-type         : aead
-async        : yes
-blocksize    : 1
-ivsize       : 12
-maxauthsize  : 16
-geniv        : <none>
-
-name         : gcm(aes)
-driver       : generic-gcm-aesni
-module       : kernel
-priority     : 400
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : aead
-async        : yes
-blocksize    : 1
-ivsize       : 12
-maxauthsize  : 16
-geniv        : <none>
-
-name         : __generic-gcm-aes-aesni
-driver       : __driver-generic-gcm-aes-aesni
-module       : kernel
-priority     : 0
-refcnt       : 1
-selftest     : passed
-internal     : yes
-type         : aead
-async        : no
-blocksize    : 1
-ivsize       : 12
-maxauthsize  : 16
-geniv        : <none>
-
-name         : __gcm-aes-aesni
-driver       : cryptd(__driver-gcm-aes-aesni)
-module       : kernel
-priority     : 50
-refcnt       : 1
-selftest     : passed
-internal     : yes
-type         : aead
-async        : yes
-blocksize    : 1
-ivsize       : 8
-maxauthsize  : 16
-geniv        : <none>
-
-name         : rfc4106(gcm(aes))
-driver       : rfc4106-gcm-aesni
-module       : kernel
-priority     : 400
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : aead
-async        : yes
-blocksize    : 1
-ivsize       : 8
-maxauthsize  : 16
-geniv        : <none>
-
-name         : __gcm-aes-aesni
-driver       : __driver-gcm-aes-aesni
-module       : kernel
-priority     : 0
-refcnt       : 1
-selftest     : passed
-internal     : yes
-type         : aead
-async        : no
-blocksize    : 1
-ivsize       : 8
-maxauthsize  : 16
-geniv        : <none>
-
-name         : __xts(aes)
-driver       : __xts-aes-aesni
-module       : kernel
-priority     : 401
-refcnt       : 1
-selftest     : passed
-internal     : yes
-type         : skcipher
-async        : no
-blocksize    : 16
-min keysize  : 32
-max keysize  : 64
-ivsize       : 16
-chunksize    : 16
-walksize     : 16
-
-name         : __ctr(aes)
-driver       : __ctr-aes-aesni
-module       : kernel
-priority     : 400
-refcnt       : 1
-selftest     : passed
-internal     : yes
-type         : skcipher
-async        : no
-blocksize    : 1
-min keysize  : 16
-max keysize  : 32
-ivsize       : 16
-chunksize    : 16
-walksize     : 16
-
-name         : __cbc(aes)
-driver       : __cbc-aes-aesni
-module       : kernel
-priority     : 400
-refcnt       : 1
-selftest     : passed
-internal     : yes
-type         : skcipher
-async        : no
-blocksize    : 16
-min keysize  : 16
-max keysize  : 32
-ivsize       : 16
-chunksize    : 16
-walksize     : 16
-
-name         : __ecb(aes)
-driver       : __ecb-aes-aesni
-module       : kernel
-priority     : 400
-refcnt       : 1
-selftest     : passed
-internal     : yes
-type         : skcipher
-async        : no
-blocksize    : 16
-min keysize  : 16
-max keysize  : 32
-ivsize       : 0
-chunksize    : 16
-walksize     : 16
-
-name         : __aes
-driver       : __aes-aesni
-module       : kernel
-priority     : 300
-refcnt       : 1
-selftest     : passed
-internal     : yes
-type         : cipher
-blocksize    : 16
-min keysize  : 16
-max keysize  : 32
-
-name         : aes
-driver       : aes-aesni
-module       : kernel
-priority     : 300
-refcnt       : 8
-selftest     : passed
-internal     : no
-type         : cipher
-blocksize    : 16
-min keysize  : 16
-max keysize  : 32
-
-name         : hmac(sha1)
-driver       : hmac(sha1-generic)
-module       : kernel
-priority     : 100
-refcnt       : 9
-selftest     : passed
-internal     : no
-type         : shash
-blocksize    : 64
-digestsize   : 20
-
-name         : ghash
-driver       : ghash-generic
-module       : kernel
-priority     : 100
-refcnt       : 3
-selftest     : passed
-internal     : no
-type         : shash
-blocksize    : 16
-digestsize   : 16
-
-name         : jitterentropy_rng
-driver       : jitterentropy_rng
-module       : kernel
-priority     : 100
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : rng
-seedsize     : 0
-
-name         : stdrng
-driver       : drbg_nopr_hmac_sha256
-module       : kernel
-priority     : 221
-refcnt       : 2
-selftest     : passed
-internal     : no
-type         : rng
-seedsize     : 0
-
-name         : stdrng
-driver       : drbg_nopr_hmac_sha512
-module       : kernel
-priority     : 220
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : rng
-seedsize     : 0
-
-name         : stdrng
-driver       : drbg_nopr_hmac_sha384
-module       : kernel
-priority     : 219
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : rng
-seedsize     : 0
-
-name         : stdrng
-driver       : drbg_nopr_hmac_sha1
-module       : kernel
-priority     : 218
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : rng
-seedsize     : 0
-
-name         : stdrng
-driver       : drbg_nopr_sha256
-module       : kernel
-priority     : 217
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : rng
-seedsize     : 0
-
-name         : stdrng
-driver       : drbg_nopr_sha512
-module       : kernel
-priority     : 216
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : rng
-seedsize     : 0
-
-name         : stdrng
-driver       : drbg_nopr_sha384
-module       : kernel
-priority     : 215
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : rng
-seedsize     : 0
-
-name         : stdrng
-driver       : drbg_nopr_sha1
-module       : kernel
-priority     : 214
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : rng
-seedsize     : 0
-
-name         : stdrng
-driver       : drbg_nopr_ctr_aes256
-module       : kernel
-priority     : 213
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : rng
-seedsize     : 0
-
-name         : stdrng
-driver       : drbg_nopr_ctr_aes192
-module       : kernel
-priority     : 212
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : rng
-seedsize     : 0
-
-name         : stdrng
-driver       : drbg_nopr_ctr_aes128
-module       : kernel
-priority     : 211
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : rng
-seedsize     : 0
-
-name         : hmac(sha256)
-driver       : hmac(sha256-generic)
-module       : kernel
-priority     : 100
-refcnt       : 10
-selftest     : passed
-internal     : no
-type         : shash
-blocksize    : 64
-digestsize   : 32
-
-name         : stdrng
-driver       : drbg_pr_hmac_sha256
-module       : kernel
-priority     : 210
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : rng
-seedsize     : 0
-
-name         : stdrng
-driver       : drbg_pr_hmac_sha512
-module       : kernel
-priority     : 209
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : rng
-seedsize     : 0
-
-name         : stdrng
-driver       : drbg_pr_hmac_sha384
-module       : kernel
-priority     : 208
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : rng
-seedsize     : 0
-
-name         : stdrng
-driver       : drbg_pr_hmac_sha1
-module       : kernel
-priority     : 207
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : rng
-seedsize     : 0
-
-name         : stdrng
-driver       : drbg_pr_sha256
-module       : kernel
-priority     : 206
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : rng
-seedsize     : 0
-
-name         : stdrng
-driver       : drbg_pr_sha512
-module       : kernel
-priority     : 205
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : rng
-seedsize     : 0
-
-name         : stdrng
-driver       : drbg_pr_sha384
-module       : kernel
-priority     : 204
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : rng
-seedsize     : 0
-
-name         : stdrng
-driver       : drbg_pr_sha1
-module       : kernel
-priority     : 203
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : rng
-seedsize     : 0
-
-name         : stdrng
-driver       : drbg_pr_ctr_aes256
-module       : kernel
-priority     : 202
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : rng
-seedsize     : 0
-
-name         : stdrng
-driver       : drbg_pr_ctr_aes192
-module       : kernel
-priority     : 201
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : rng
-seedsize     : 0
-
-name         : stdrng
-driver       : drbg_pr_ctr_aes128
-module       : kernel
-priority     : 200
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : rng
-seedsize     : 0
-
-name         : 842
-driver       : 842-scomp
-module       : kernel
-priority     : 100
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : scomp
-
-name         : 842
-driver       : 842-generic
-module       : kernel
-priority     : 100
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : compression
-
-name         : lzo-rle
-driver       : lzo-rle-scomp
-module       : kernel
-priority     : 0
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : scomp
-
-name         : lzo-rle
-driver       : lzo-rle-generic
-module       : kernel
-priority     : 0
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : compression
-
-name         : lzo
-driver       : lzo-scomp
-module       : kernel
-priority     : 0
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : scomp
-
-name         : lzo
-driver       : lzo-generic
-module       : kernel
-priority     : 0
-refcnt       : 9
-selftest     : passed
-internal     : no
-type         : compression
-
-name         : crct10dif
-driver       : crct10dif-generic
-module       : kernel
-priority     : 100
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : shash
-blocksize    : 1
-digestsize   : 2
-
-name         : crc32c
-driver       : crc32c-generic
-module       : kernel
-priority     : 100
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : shash
-blocksize    : 1
-digestsize   : 4
-
-name         : zlib-deflate
-driver       : zlib-deflate-scomp
-module       : kernel
-priority     : 0
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : scomp
-
-name         : deflate
-driver       : deflate-scomp
-module       : kernel
-priority     : 0
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : scomp
-
-name         : deflate
-driver       : deflate-generic
-module       : kernel
-priority     : 0
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : compression
-
-name         : aes
-driver       : aes-generic
-module       : kernel
-priority     : 100
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : cipher
-blocksize    : 16
-min keysize  : 16
-max keysize  : 32
-
-name         : sha224
-driver       : sha224-generic
-module       : kernel
-priority     : 100
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : shash
-blocksize    : 64
-digestsize   : 28
-
-name         : sha256
-driver       : sha256-generic
-module       : kernel
-priority     : 100
-refcnt       : 11
-selftest     : passed
-internal     : no
-type         : shash
-blocksize    : 64
-digestsize   : 32
-
-name         : sha1
-driver       : sha1-generic
-module       : kernel
-priority     : 100
-refcnt       : 11
-selftest     : passed
-internal     : no
-type         : shash
-blocksize    : 64
-digestsize   : 20
-
-name         : md5
-driver       : md5-generic
-module       : kernel
-priority     : 0
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : shash
-blocksize    : 64
-digestsize   : 16
-
-name         : ecb(cipher_null)
-driver       : ecb-cipher_null
-module       : kernel
-priority     : 100
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : skcipher
-async        : no
-blocksize    : 1
-min keysize  : 0
-max keysize  : 0
-ivsize       : 0
-chunksize    : 1
-walksize     : 1
-
-name         : digest_null
-driver       : digest_null-generic
-module       : kernel
-priority     : 0
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : shash
-blocksize    : 1
-digestsize   : 0
-
-name         : compress_null
-driver       : compress_null-generic
-module       : kernel
-priority     : 0
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : compression
-
-name         : cipher_null
-driver       : cipher_null-generic
-module       : kernel
-priority     : 0
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : cipher
-blocksize    : 1
-min keysize  : 0
-max keysize  : 0
-
-name         : rsa
-driver       : rsa-generic
-module       : kernel
-priority     : 100
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : akcipher
-
-name         : dh
-driver       : dh-generic
-module       : kernel
-priority     : 100
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : kpp
-
-name         : aes
-driver       : aes-asm
-module       : kernel
-priority     : 200
-refcnt       : 1
-selftest     : passed
-internal     : no
-type         : cipher
-blocksize    : 16
-min keysize  : 16
-max keysize  : 32
-
-Mode: 444
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/diskstats
-Lines: 52
-   1       0 ram0 0 0 0 0 0 0 0 0 0 0 0
-   1       1 ram1 0 0 0 0 0 0 0 0 0 0 0
-   1       2 ram2 0 0 0 0 0 0 0 0 0 0 0
-   1       3 ram3 0 0 0 0 0 0 0 0 0 0 0
-   1       4 ram4 0 0 0 0 0 0 0 0 0 0 0
-   1       5 ram5 0 0 0 0 0 0 0 0 0 0 0
-   1       6 ram6 0 0 0 0 0 0 0 0 0 0 0
-   1       7 ram7 0 0 0 0 0 0 0 0 0 0 0
-   1       8 ram8 0 0 0 0 0 0 0 0 0 0 0
-   1       9 ram9 0 0 0 0 0 0 0 0 0 0 0
-   1      10 ram10 0 0 0 0 0 0 0 0 0 0 0
-   1      11 ram11 0 0 0 0 0 0 0 0 0 0 0
-   1      12 ram12 0 0 0 0 0 0 0 0 0 0 0
-   1      13 ram13 0 0 0 0 0 0 0 0 0 0 0
-   1      14 ram14 0 0 0 0 0 0 0 0 0 0 0
-   1      15 ram15 0 0 0 0 0 0 0 0 0 0 0
-   7       0 loop0 0 0 0 0 0 0 0 0 0 0 0
-   7       1 loop1 0 0 0 0 0 0 0 0 0 0 0
-   7       2 loop2 0 0 0 0 0 0 0 0 0 0 0
-   7       3 loop3 0 0 0 0 0 0 0 0 0 0 0
-   7       4 loop4 0 0 0 0 0 0 0 0 0 0 0
-   7       5 loop5 0 0 0 0 0 0 0 0 0 0 0
-   7       6 loop6 0 0 0 0 0 0 0 0 0 0 0
-   7       7 loop7 0 0 0 0 0 0 0 0 0 0 0
-   8       0 sda 25354637 34367663 1003346126 18492372 28444756 11134226 505697032 63877960 0 9653880 82621804
-   8       1 sda1 250 0 2000 36 0 0 0 0 0 36 36
-   8       2 sda2 246 0 1968 32 0 0 0 0 0 32 32
-   8       3 sda3 340 13 2818 52 11 8 152 8 0 56 60
-   8       4 sda4 25353629 34367650 1003337964 18492232 27448755 11134218 505696880 61593380 0 7576432 80332428
- 252       0 dm-0 59910002 0 1003337218 46229572 39231014 0 505696880 1158557800 0 11325968 1206301256
- 252       1 dm-1 388 0 3104 84 74 0 592 0 0 76 84
- 252       2 dm-2 11571 0 308350 6536 153522 0 5093416 122884 0 65400 129416
- 252       3 dm-3 3870 0 3870 104 0 0 0 0 0 16 104
- 252       4 dm-4 392 0 1034 28 38 0 137 16 0 24 44
- 252       5 dm-5 3729 0 84279 924 98918 0 1151688 104684 0 58848 105632
- 179       0 mmcblk0 192 3 1560 156 0 0 0 0 0 136 156
- 179       1 mmcblk0p1 17 3 160 24 0 0 0 0 0 24 24
- 179       2 mmcblk0p2 95 0 760 68 0 0 0 0 0 68 68
-   2       0 fd0 2 0 16 80 0 0 0 0 0 80 80
- 254       0 vda 1775784 15386 32670882 8655768 6038856 20711856 213637440 2069221364 0 41614592 2077872228
- 254       1 vda1 668 85 5984 956 207 4266 35784 32772 0 8808 33720
- 254       2 vda2 1774936 15266 32663262 8654692 5991028 20707590 213601656 2069152216 0 41607628 2077801992
-  11       0 sr0 0 0 0 0 0 0 0 0 0 0 0
- 259       0 nvme0n1 47114 4 4643973 21650 1078320 43950 39451633 1011053 0 222766 1032546
- 259       1 nvme0n1p1 1140 0 9370 16 1 0 1 0 0 16 16
- 259       2 nvme0n1p2 45914 4 4631243 21626 1036885 43950 39451632 919480 0 131580 940970
-   8       0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130
-   8       1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0
-   8       2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130
-   8       0 sdc 14202 71 579164 21861 2995 1589 180500 40875 0 11628 55200 0 0 0 0 127 182
-   8       1 sdc1 1027 0 13795 5021 2 0 4096 3 0 690 4579 0 0 0 0 0 0
-   8       2 sdc2 13126 71 561749 16802 2830 1589 176404 40620 0 10931 50449 0 0 0 0 0 0
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/fs
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/fs/fscache
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/fs/fscache/stats
-Lines: 24
-FS-Cache statistics
-Cookies: idx=3 dat=67877 spc=0
-Objects: alc=67473 nal=0 avl=67473 ded=388
-ChkAux : non=12 ok=33 upd=44 obs=55
-Pages  : mrk=547164 unc=364577
-Acquire: n=67880 nul=98 noc=25 ok=67780 nbf=39 oom=26
-Lookups: n=67473 neg=67470 pos=58 crt=67473 tmo=85
-Invals : n=14 run=13
-Updates: n=7 nul=3 run=8
-Relinqs: n=394 nul=1 wcr=2 rtr=3
-AttrChg: n=6 ok=5 nbf=4 oom=3 run=2
-Allocs : n=20 ok=19 wt=18 nbf=17 int=16
-Allocs : ops=15 owt=14 abt=13
-Retrvls: n=151959 ok=82823 wt=23467 nod=69136 nbf=15 int=69 oom=43
-Retrvls: ops=151959 owt=42747 abt=44
-Stores : n=225565 ok=225565 agn=12 nbf=13 oom=14
-Stores : ops=69156 run=294721 pgs=225565 rxd=225565 olm=43
-VmScan : nos=364512 gon=2 bsy=43 can=12 wt=66
-Ops    : pend=42753 run=221129 enq=628798 can=11 rej=88
-Ops    : ini=377538 dfr=27 rel=377538 gc=37
-CacheOp: alo=1 luo=2 luc=3 gro=4
-CacheOp: inv=5 upo=6 dro=7 pto=8 atc=9 syn=10
-CacheOp: rap=11 ras=12 alp=13 als=14 wrp=15 ucp=16 dsp=17
-CacheEv: nsp=18 stl=19 rtr=20 cul=21EOF
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/fs/xfs
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/fs/xfs/stat
-Lines: 23
-extent_alloc 92447 97589 92448 93751
-abt 0 0 0 0
-blk_map 1767055 188820 184891 92447 92448 2140766 0
-bmbt 0 0 0 0
-dir 185039 92447 92444 136422
-trans 706 944304 0
-ig 185045 58807 0 126238 0 33637 22
-log 2883 113448 9 17360 739
-push_ail 945014 0 134260 15483 0 3940 464 159985 0 40
-xstrat 92447 0
-rw 107739 94045
-attr 4 0 0 0
-icluster 8677 7849 135802
-vnodes 92601 0 0 0 92444 92444 92444 0
-buf 2666287 7122 2659202 3599 2 7085 0 10297 7085
-abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147
-abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023
-bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0
-fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-qm 0 0 0 0 0 0 0 0
-xpc 399724544 92823103 86219234
-debug 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/loadavg
-Lines: 1
-0.02 0.04 0.05 1/497 11947
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/mdstat
-Lines: 60
-Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
-
-md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] sdd1[10](S) sdd2[11](S)
-      5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU]
-
-md127 : active raid1 sdi2[0] sdj2[1]
-      312319552 blocks [2/2] [UU]
-
-md0 : active raid1 sdi1[0] sdj1[1]
-      248896 blocks [2/2] [UU]
-
-md4 : inactive raid1 sda3[0](F) sdb3[1](S)
-      4883648 blocks [2/2] [UU]
-
-md6 : active raid1 sdb2[2](F) sdc[1](S) sda2[0]
-      195310144 blocks [2/1] [U_]
-      [=>...................]  recovery =  8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
-
-md8 : active raid1 sdb1[1] sda1[0] sdc[2](S) sde[3](S)
-      195310144 blocks [2/2] [UU]
-      [=>...................]  resync =  8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
-
-md201 : active raid1 sda3[0] sdb3[1]
-      1993728 blocks super 1.2 [2/2] [UU]
-      [=>...................]  check =  5.7% (114176/1993728) finish=0.2min speed=114176K/sec
-
-md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1](F)
-      7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU]
-      bitmap: 0/30 pages [0KB], 65536KB chunk
-
-md9 : active raid1 sdc2[2] sdd2[3] sdb2[1] sda2[0] sde[4](F) sdf[5](F) sdg[6](S)
-      523968 blocks super 1.2 [4/4] [UUUU]
-      resync=DELAYED
-
-md10 : active raid0 sda1[0] sdb1[1]
-       314159265 blocks 64k chunks
-
-md11 : active (auto-read-only) raid1 sdb2[0] sdc2[1] sdc3[2](F) hda[4](S) ssdc2[3](S)
-      4190208 blocks super 1.2 [2/2] [UU]
-      resync=PENDING
-
-md12 : active raid0 sdc2[0] sdd2[1]
-      3886394368 blocks super 1.2 512k chunks
-
-md126 : active raid0 sdb[1] sdc[0]
-      1855870976 blocks super external:/md127/0 128k chunks
-
-md219 : inactive sdb[2](S) sdc[1](S) sda[0](S)
-        7932 blocks super external:imsm
-
-md00 : active raid0 xvdb[0]
-      4186624 blocks super 1.2 256k chunks
-
-md120 : active linear sda1[1] sdb1[0]
-        2095104 blocks super 1.2 0k rounding
-
-md101 : active (read-only) raid0 sdb[2] sdd[1] sdc[0]
-      322560 blocks super 1.2 512k chunks
-
-unused devices: <none>
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/meminfo
-Lines: 42
-MemTotal:       15666184 kB
-MemFree:          440324 kB
-Buffers:         1020128 kB
-Cached:         12007640 kB
-SwapCached:            0 kB
-Active:          6761276 kB
-Inactive:        6532708 kB
-Active(anon):     267256 kB
-Inactive(anon):      268 kB
-Active(file):    6494020 kB
-Inactive(file):  6532440 kB
-Unevictable:           0 kB
-Mlocked:               0 kB
-SwapTotal:             0 kB
-SwapFree:              0 kB
-Dirty:               768 kB
-Writeback:             0 kB
-AnonPages:        266216 kB
-Mapped:            44204 kB
-Shmem:              1308 kB
-Slab:            1807264 kB
-SReclaimable:    1738124 kB
-SUnreclaim:        69140 kB
-KernelStack:        1616 kB
-PageTables:         5288 kB
-NFS_Unstable:          0 kB
-Bounce:                0 kB
-WritebackTmp:          0 kB
-CommitLimit:     7833092 kB
-Committed_AS:     530844 kB
-VmallocTotal:   34359738367 kB
-VmallocUsed:       36596 kB
-VmallocChunk:   34359637840 kB
-HardwareCorrupted:     0 kB
-AnonHugePages:     12288 kB
-HugePages_Total:       0
-HugePages_Free:        0
-HugePages_Rsvd:        0
-HugePages_Surp:        0
-Hugepagesize:       2048 kB
-DirectMap4k:       91136 kB
-DirectMap2M:    16039936 kB
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/net
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/arp
-Lines: 2
-IP address       HW type     Flags       HW address            Mask     Device
-192.168.224.1    0x1         0x2         00:50:56:c0:00:08     *        ens33
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/dev
-Lines: 6
-Inter-|   Receive                                                |  Transmit
- face |bytes    packets errs drop fifo frame compressed multicast|bytes    packets errs drop fifo colls carrier compressed
-vethf345468:     648       8    0    0    0     0          0         0      438       5    0    0    0     0       0          0
-    lo: 1664039048 1566805    0    0    0     0          0         0 1664039048 1566805    0    0    0     0       0          0
-docker0:    2568      38    0    0    0     0          0         0      438       5    0    0    0     0       0          0
-  eth0: 874354587 1036395    0    0    0     0          0         0 563352563  732147    0    0    0     0       0          0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/ip_vs
-Lines: 21
-IP Virtual Server version 1.2.1 (size=4096)
-Prot LocalAddress:Port Scheduler Flags
-  -> RemoteAddress:Port Forward Weight ActiveConn InActConn
-TCP  C0A80016:0CEA wlc
-  -> C0A85216:0CEA      Tunnel  100    248        2
-  -> C0A85318:0CEA      Tunnel  100    248        2
-  -> C0A85315:0CEA      Tunnel  100    248        1
-TCP  C0A80039:0CEA wlc
-  -> C0A85416:0CEA      Tunnel  0      0          0
-  -> C0A85215:0CEA      Tunnel  100    1499       0
-  -> C0A83215:0CEA      Tunnel  100    1498       0
-TCP  C0A80037:0CEA wlc
-  -> C0A8321A:0CEA      Tunnel  0      0          0
-  -> C0A83120:0CEA      Tunnel  100    0          0
-TCP  [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh
-  -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050      Route   1      0          0
-  -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050      Route   1      0          0
-  -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050      Route   1      1          1
-FWM  10001000 wlc
-  -> C0A8321A:0CEA      Route   0      0          1
-  -> C0A83215:0CEA      Route   0      0          2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/ip_vs_stats
-Lines: 6
-   Total Incoming Outgoing         Incoming         Outgoing
-   Conns  Packets  Packets            Bytes            Bytes
- 16AA370 E33656E5        0     51D8C8883AB3                0
-
- Conns/s   Pkts/s   Pkts/s          Bytes/s          Bytes/s
-       4    1FB3C        0          1282A8F                0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/protocols
-Lines: 14
-protocol  size sockets  memory press maxhdr  slab module     cl co di ac io in de sh ss gs se re sp bi br ha uh gp em
-PACKET    1344      2      -1   NI       0   no   kernel      n  n  n  n  n  n  n  n  n  n  n  n  n  n  n  n  n  n  n
-PINGv6    1112      0      -1   NI       0   yes  kernel      y  y  y  n  n  y  n  n  y  y  y  y  n  y  y  y  y  y  n
-RAWv6     1112      1      -1   NI       0   yes  kernel      y  y  y  n  y  y  y  n  y  y  y  y  n  y  y  y  y  n  n
-UDPLITEv6 1216      0      57   NI       0   yes  kernel      y  y  y  n  y  y  y  n  y  y  y  y  n  n  n  y  y  y  n
-UDPv6     1216     10      57   NI       0   yes  kernel      y  y  y  n  y  y  y  n  y  y  y  y  n  n  n  y  y  y  n
-TCPv6     2144   1937  1225378   no     320   yes  kernel      y  y  y  y  y  y  y  y  y  y  y  y  y  n  y  y  y  y  y
-UNIX      1024    120      -1   NI       0   yes  kernel      n  n  n  n  n  n  n  n  n  n  n  n  n  n  n  n  n  n  n
-UDP-Lite  1024      0      57   NI       0   yes  kernel      y  y  y  n  y  y  y  n  y  y  y  y  y  n  n  y  y  y  n
-PING       904      0      -1   NI       0   yes  kernel      y  y  y  n  n  y  n  n  y  y  y  y  n  y  y  y  y  y  n
-RAW        912      0      -1   NI       0   yes  kernel      y  y  y  n  y  y  y  n  y  y  y  y  n  y  y  y  y  n  n
-UDP       1024     73      57   NI       0   yes  kernel      y  y  y  n  y  y  y  n  y  y  y  y  y  n  n  y  y  y  n
-TCP       1984  93064  1225378   yes     320   yes  kernel      y  y  y  y  y  y  y  y  y  y  y  y  y  n  y  y  y  y  y
-NETLINK   1040     16      -1   NI       0   no   kernel      n  n  n  n  n  n  n  n  n  n  n  n  n  n  n  n  n  n  n
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/net/rpc
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/rpc/nfs
-Lines: 5
-net 18628 0 18628 6
-rpc 4329785 0 4338291
-proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
-proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39
-proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/rpc/nfsd
-Lines: 11
-rc 0 6 18622
-fh 0 0 0 0 0
-io 157286400 0
-th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
-ra 32 0 0 0 0 0 0 0 0 0 0 0
-net 18628 0 18628 6
-rpc 18628 0 0 0 0
-proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
-proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0
-proc4 2 2 10853
-proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/sockstat
-Lines: 6
-sockets: used 1602
-TCP: inuse 35 orphan 0 tw 4 alloc 59 mem 22
-UDP: inuse 12 mem 62
-UDPLITE: inuse 0
-RAW: inuse 0
-FRAG: inuse 0 memory 0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/sockstat6
-Lines: 5
-TCP6: inuse 17
-UDP6: inuse 9
-UDPLITE6: inuse 0
-RAW6: inuse 1
-FRAG6: inuse 0 memory 0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/softnet_stat
-Lines: 2
-00015c73 00020e76 F0000769 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
-01663fb2 00000000 000109a4 00000000 00000000 00000000 00000000 00000000 00000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/softnet_stat.broken
-Lines: 1
-00015c73 00020e76 F0000769 00000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/tcp
-Lines: 4
-  sl  local_address rem_address   st tx_queue rx_queue tr tm->when retrnsmt   uid  timeout inode
-   0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000     0        0 2740 1 ffff88003d3af3c0 100 0 0 10 0
-   1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000     0        0 2740 1 ffff88003d3af3c0 100 0 0 10 0
-   2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000     0        0 2740 1 ffff88003d3af3c0 100 0 0 10 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/tcp6
-Lines: 3
-  sl  local_address                         remote_address                        st tx_queue rx_queue tr tm->when retrnsmt   uid  timeout inode ref pointer drops
- 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000   981        0 21040 2 0000000013726323 0
- 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000  1000        0 11337031 2 00000000b9256fdd 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/udp
-Lines: 4
-  sl  local_address rem_address   st tx_queue rx_queue tr tm->when retrnsmt   uid  timeout inode
-   0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000     0        0 2740 1 ffff88003d3af3c0 100 0 0 10 0
-   1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000     0        0 2740 1 ffff88003d3af3c0 100 0 0 10 0
-   2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000     0        0 2740 1 ffff88003d3af3c0 100 0 0 10 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/udp6
-Lines: 3
-  sl  local_address                         remote_address                        st tx_queue rx_queue tr tm->when retrnsmt   uid  timeout inode ref pointer drops
- 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000   981        0 21040 2 0000000013726323 0
- 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000  1000        0 11337031 2 00000000b9256fdd 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/udp_broken
-Lines: 2
-  sl  local_address rem_address   st
-   1: 00000000:0016 00000000:0000 0A
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/unix
-Lines: 6
-Num       RefCount Protocol Flags    Type St Inode Path
-0000000000000000: 00000002 00000000 00010000 0001 01 3442596 /var/run/postgresql/.s.PGSQL.5432
-0000000000000000: 0000000a 00000000 00010000 0005 01 10061 /run/udev/control
-0000000000000000: 00000007 00000000 00000000 0002 01 12392 /dev/log
-0000000000000000: 00000003 00000000 00000000 0001 03 4787297 /var/run/postgresql/.s.PGSQL.5432
-0000000000000000: 00000003 00000000 00000000 0001 03 5091797
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/unix_without_inode
-Lines: 6
-Num       RefCount Protocol Flags    Type St Path
-0000000000000000: 00000002 00000000 00010000 0001 01 /var/run/postgresql/.s.PGSQL.5432
-0000000000000000: 0000000a 00000000 00010000 0005 01 /run/udev/control
-0000000000000000: 00000007 00000000 00000000 0002 01 /dev/log
-0000000000000000: 00000003 00000000 00000000 0001 03 /var/run/postgresql/.s.PGSQL.5432
-0000000000000000: 00000003 00000000 00000000 0001 03
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/xfrm_stat
-Lines: 28
-XfrmInError                     1
-XfrmInBufferError               2
-XfrmInHdrError                  4
-XfrmInNoStates                  3
-XfrmInStateProtoError           40
-XfrmInStateModeError            100
-XfrmInStateSeqError             6000
-XfrmInStateExpired              4
-XfrmInStateMismatch             23451
-XfrmInStateInvalid              55555
-XfrmInTmplMismatch              51
-XfrmInNoPols                    65432
-XfrmInPolBlock                  100
-XfrmInPolError                  10000
-XfrmOutError                    1000000
-XfrmOutBundleGenError           43321
-XfrmOutBundleCheckError         555
-XfrmOutNoStates                 869
-XfrmOutStateProtoError          4542
-XfrmOutStateModeError           4
-XfrmOutStateSeqError            543
-XfrmOutStateExpired             565
-XfrmOutPolBlock                 43456
-XfrmOutPolDead                  7656
-XfrmOutPolError                 1454
-XfrmFwdHdrError                 6654
-XfrmOutStateInvalid             28765
-XfrmAcquireError                24532
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/pressure
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/pressure/cpu
-Lines: 1
-some avg10=0.10 avg60=2.00 avg300=3.85 total=15
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/pressure/io
-Lines: 2
-some avg10=0.10 avg60=2.00 avg300=3.85 total=15
-full avg10=0.20 avg60=3.00 avg300=4.95 total=25
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/pressure/memory
-Lines: 2
-some avg10=0.10 avg60=2.00 avg300=3.85 total=15
-full avg10=0.20 avg60=3.00 avg300=4.95 total=25
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/schedstat
-Lines: 6
-version 15
-timestamp 15819019232
-cpu0 498494191 0 3533438552 2553969831 3853684107 2465731542 2045936778163039 343796328169361 4767485306
-domain0 00000000,00000003 212499247 210112015 1861015 1860405436 536440 369895 32599 210079416 25368550 24241256 384652 927363878 807233 6366 1647 24239609 2122447165 1886868564 121112060 2848625533 125678146 241025 1032026 1885836538 2545 12 2533 0 0 0 0 0 0 1387952561 21076581 0
-cpu1 518377256 0 4155211005 2778589869 10466382 2867629021 1904686152592476 364107263788241 5145567945
-domain0 00000000,00000003 217653037 215526982 1577949 1580427380 557469 393576 28538 215498444 28721913 27662819 371153 870843407 745912 5523 1639 27661180 2331056874 2107732788 111442342 652402556 123615235 196159 1045245 2106687543 2400 3 2397 0 0 0 0 0 0 1437804657 26220076 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/self
-SymlinkTo: 26231
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/slabinfo
-Lines: 302
-slabinfo - version: 2.1
-# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab> : tunables <limit> <batchcount> <sharedfactor> : slabdata <active_slabs> <num_slabs> <sharedavail>
-pid_3                375    532    576   28    4 : tunables    0    0    0 : slabdata     19     19      0
-pid_2                  3     28    576   28    4 : tunables    0    0    0 : slabdata      1      1      0
-nvidia_p2p_page_cache      0      0    368   22    2 : tunables    0    0    0 : slabdata      0      0      0
-nvidia_pte_cache    9022   9152    368   22    2 : tunables    0    0    0 : slabdata    416    416      0
-nvidia_stack_cache    321    326  12624    2    8 : tunables    0    0    0 : slabdata    163    163      0
-kvm_async_pf           0      0    472   34    4 : tunables    0    0    0 : slabdata      0      0      0
-kvm_vcpu               0      0  15552    2    8 : tunables    0    0    0 : slabdata      0      0      0
-kvm_mmu_page_header      0      0    504   32    4 : tunables    0    0    0 : slabdata      0      0      0
-pte_list_desc          0      0    368   22    2 : tunables    0    0    0 : slabdata      0      0      0
-x86_emulator           0      0   3024   10    8 : tunables    0    0    0 : slabdata      0      0      0
-x86_fpu                0      0   4608    7    8 : tunables    0    0    0 : slabdata      0      0      0
-iwl_cmd_pool:0000:04:00.0      0    128    512   32    4 : tunables    0    0    0 : slabdata      4      4      0
-ext4_groupinfo_4k   3719   3740    480   34    4 : tunables    0    0    0 : slabdata    110    110      0
-bio-6                 32     75    640   25    4 : tunables    0    0    0 : slabdata      3      3      0
-bio-5                 16     48   1344   24    8 : tunables    0    0    0 : slabdata      2      2      0
-bio-4                 17     92   1408   23    8 : tunables    0    0    0 : slabdata      4      4      0
-fat_inode_cache        0      0   1056   31    8 : tunables    0    0    0 : slabdata      0      0      0
-fat_cache              0      0    368   22    2 : tunables    0    0    0 : slabdata      0      0      0
-ovl_aio_req            0      0    512   32    4 : tunables    0    0    0 : slabdata      0      0      0
-ovl_inode              0      0   1000   32    8 : tunables    0    0    0 : slabdata      0      0      0
-squashfs_inode_cache      0      0   1088   30    8 : tunables    0    0    0 : slabdata      0      0      0
-fuse_request           0      0    472   34    4 : tunables    0    0    0 : slabdata      0      0      0
-fuse_inode             0      0   1152   28    8 : tunables    0    0    0 : slabdata      0      0      0
-xfs_dqtrx              0      0    864   37    8 : tunables    0    0    0 : slabdata      0      0      0
-xfs_dquot              0      0    832   39    8 : tunables    0    0    0 : slabdata      0      0      0
-xfs_buf                0      0    768   21    4 : tunables    0    0    0 : slabdata      0      0      0
-xfs_bui_item           0      0    544   30    4 : tunables    0    0    0 : slabdata      0      0      0
-xfs_bud_item           0      0    512   32    4 : tunables    0    0    0 : slabdata      0      0      0
-xfs_cui_item           0      0    768   21    4 : tunables    0    0    0 : slabdata      0      0      0
-xfs_cud_item           0      0    512   32    4 : tunables    0    0    0 : slabdata      0      0      0
-xfs_rui_item           0      0   1024   32    8 : tunables    0    0    0 : slabdata      0      0      0
-xfs_rud_item           0      0    512   32    4 : tunables    0    0    0 : slabdata      0      0      0
-xfs_icr                0      0    520   31    4 : tunables    0    0    0 : slabdata      0      0      0
-xfs_ili                0      0    528   31    4 : tunables    0    0    0 : slabdata      0      0      0
-xfs_inode              0      0   1344   24    8 : tunables    0    0    0 : slabdata      0      0      0
-xfs_efi_item           0      0    768   21    4 : tunables    0    0    0 : slabdata      0      0      0
-xfs_efd_item           0      0    776   21    4 : tunables    0    0    0 : slabdata      0      0      0
-xfs_buf_item           0      0    608   26    4 : tunables    0    0    0 : slabdata      0      0      0
-xf_trans               0      0    568   28    4 : tunables    0    0    0 : slabdata      0      0      0
-xfs_ifork              0      0    376   21    2 : tunables    0    0    0 : slabdata      0      0      0
-xfs_da_state           0      0    816   20    4 : tunables    0    0    0 : slabdata      0      0      0
-xfs_btree_cur          0      0    560   29    4 : tunables    0    0    0 : slabdata      0      0      0
-xfs_bmap_free_item      0      0    400   20    2 : tunables    0    0    0 : slabdata      0      0      0
-xfs_log_ticket         0      0    520   31    4 : tunables    0    0    0 : slabdata      0      0      0
-nfs_direct_cache       0      0    560   29    4 : tunables    0    0    0 : slabdata      0      0      0
-nfs_commit_data        4     28   1152   28    8 : tunables    0    0    0 : slabdata      1      1      0
-nfs_write_data        32     50   1280   25    8 : tunables    0    0    0 : slabdata      2      2      0
-nfs_read_data          0      0   1280   25    8 : tunables    0    0    0 : slabdata      0      0      0
-nfs_inode_cache        0      0   1408   23    8 : tunables    0    0    0 : slabdata      0      0      0
-nfs_page               0      0    512   32    4 : tunables    0    0    0 : slabdata      0      0      0
-rpc_inode_cache        0      0   1024   32    8 : tunables    0    0    0 : slabdata      0      0      0
-rpc_buffers            8     13   2496   13    8 : tunables    0    0    0 : slabdata      1      1      0
-rpc_tasks              8     25    640   25    4 : tunables    0    0    0 : slabdata      1      1      0
-fscache_cookie_jar      1     35    464   35    4 : tunables    0    0    0 : slabdata      1      1      0
-jfs_mp                32     35    464   35    4 : tunables    0    0    0 : slabdata      1      1      0
-jfs_ip                 0      0   1592   20    8 : tunables    0    0    0 : slabdata      0      0      0
-reiser_inode_cache      0      0   1096   29    8 : tunables    0    0    0 : slabdata      0      0      0
-btrfs_end_io_wq        0      0    464   35    4 : tunables    0    0    0 : slabdata      0      0      0
-btrfs_prelim_ref       0      0    424   38    4 : tunables    0    0    0 : slabdata      0      0      0
-btrfs_delayed_extent_op      0      0    368   22    2 : tunables    0    0    0 : slabdata      0      0      0
-btrfs_delayed_data_ref      0      0    448   36    4 : tunables    0    0    0 : slabdata      0      0      0
-btrfs_delayed_tree_ref      0      0    440   37    4 : tunables    0    0    0 : slabdata      0      0      0
-btrfs_delayed_ref_head      0      0    480   34    4 : tunables    0    0    0 : slabdata      0      0      0
-btrfs_inode_defrag      0      0    400   20    2 : tunables    0    0    0 : slabdata      0      0      0
-btrfs_delayed_node      0      0    648   25    4 : tunables    0    0    0 : slabdata      0      0      0
-btrfs_ordered_extent      0      0    752   21    4 : tunables    0    0    0 : slabdata      0      0      0
-btrfs_extent_map       0      0    480   34    4 : tunables    0    0    0 : slabdata      0      0      0
-btrfs_extent_state      0      0    416   39    4 : tunables    0    0    0 : slabdata      0      0      0
-bio-3                 35     92    704   23    4 : tunables    0    0    0 : slabdata      4      4      0
-btrfs_extent_buffer      0      0    600   27    4 : tunables    0    0    0 : slabdata      0      0      0
-btrfs_free_space_bitmap      0      0  12288    2    8 : tunables    0    0    0 : slabdata      0      0      0
-btrfs_free_space       0      0    416   39    4 : tunables    0    0    0 : slabdata      0      0      0
-btrfs_path             0      0    448   36    4 : tunables    0    0    0 : slabdata      0      0      0
-btrfs_trans_handle      0      0    440   37    4 : tunables    0    0    0 : slabdata      0      0      0
-btrfs_inode            0      0   1496   21    8 : tunables    0    0    0 : slabdata      0      0      0
-ext4_inode_cache   84136  84755   1400   23    8 : tunables    0    0    0 : slabdata   3685   3685      0
-ext4_free_data        22     80    392   20    2 : tunables    0    0    0 : slabdata      4      4      0
-ext4_allocation_context      0     70    464   35    4 : tunables    0    0    0 : slabdata      2      2      0
-ext4_prealloc_space     24     74    440   37    4 : tunables    0    0    0 : slabdata      2      2      0
-ext4_system_zone     267    273    376   21    2 : tunables    0    0    0 : slabdata     13     13      0
-ext4_io_end_vec        0     88    368   22    2 : tunables    0    0    0 : slabdata      4      4      0
-ext4_io_end            0     80    400   20    2 : tunables    0    0    0 : slabdata      4      4      0
-ext4_bio_post_read_ctx    128    147    384   21    2 : tunables    0    0    0 : slabdata      7      7      0
-ext4_pending_reservation      0      0    368   22    2 : tunables    0    0    0 : slabdata      0      0      0
-ext4_extent_status  79351  79422    376   21    2 : tunables    0    0    0 : slabdata   3782   3782      0
-jbd2_transaction_s     44    100    640   25    4 : tunables    0    0    0 : slabdata      4      4      0
-jbd2_inode          6785   6840    400   20    2 : tunables    0    0    0 : slabdata    342    342      0
-jbd2_journal_handle      0     80    392   20    2 : tunables    0    0    0 : slabdata      4      4      0
-jbd2_journal_head    824   1944    448   36    4 : tunables    0    0    0 : slabdata     54     54      0
-jbd2_revoke_table_s      4     23    352   23    2 : tunables    0    0    0 : slabdata      1      1      0
-jbd2_revoke_record_s      0    156    416   39    4 : tunables    0    0    0 : slabdata      4      4      0
-ext2_inode_cache       0      0   1144   28    8 : tunables    0    0    0 : slabdata      0      0      0
-mbcache                0      0    392   20    2 : tunables    0    0    0 : slabdata      0      0      0
-dm_thin_new_mapping      0    152    424   38    4 : tunables    0    0    0 : slabdata      4      4      0
-dm_snap_pending_exception      0      0    464   35    4 : tunables    0    0    0 : slabdata      0      0      0
-dm_exception           0      0    368   22    2 : tunables    0    0    0 : slabdata      0      0      0
-dm_dirty_log_flush_entry      0      0    368   22    2 : tunables    0    0    0 : slabdata      0      0      0
-dm_bio_prison_cell_v2      0      0    432   37    4 : tunables    0    0    0 : slabdata      0      0      0
-dm_bio_prison_cell      0    148    432   37    4 : tunables    0    0    0 : slabdata      4      4      0
-kcopyd_job             0      8   3648    8    8 : tunables    0    0    0 : slabdata      1      1      0
-io                     0     32    512   32    4 : tunables    0    0    0 : slabdata      1      1      0
-dm_uevent              0      0   3224   10    8 : tunables    0    0    0 : slabdata      0      0      0
-dax_cache              1     28   1152   28    8 : tunables    0    0    0 : slabdata      1      1      0
-aic94xx_ascb           0      0    576   28    4 : tunables    0    0    0 : slabdata      0      0      0
-aic94xx_dma_token      0      0    384   21    2 : tunables    0    0    0 : slabdata      0      0      0
-asd_sas_event          0      0    512   32    4 : tunables    0    0    0 : slabdata      0      0      0
-sas_task               0      0    704   23    4 : tunables    0    0    0 : slabdata      0      0      0
-qla2xxx_srbs           0      0    832   39    8 : tunables    0    0    0 : slabdata      0      0      0
-sd_ext_cdb             2     22    368   22    2 : tunables    0    0    0 : slabdata      1      1      0
-scsi_sense_cache     258    288    512   32    4 : tunables    0    0    0 : slabdata      9      9      0
-virtio_scsi_cmd       64     75    640   25    4 : tunables    0    0    0 : slabdata      3      3      0
-L2TP/IPv6              0      0   1536   21    8 : tunables    0    0    0 : slabdata      0      0      0
-L2TP/IP                0      0   1408   23    8 : tunables    0    0    0 : slabdata      0      0      0
-ip6-frags              0      0    520   31    4 : tunables    0    0    0 : slabdata      0      0      0
-fib6_nodes             5     32    512   32    4 : tunables    0    0    0 : slabdata      1      1      0
-ip6_dst_cache          4     25    640   25    4 : tunables    0    0    0 : slabdata      1      1      0
-ip6_mrt_cache          0      0    576   28    4 : tunables    0    0    0 : slabdata      0      0      0
-PINGv6                 0      0   1600   20    8 : tunables    0    0    0 : slabdata      0      0      0
-RAWv6                 25     40   1600   20    8 : tunables    0    0    0 : slabdata      2      2      0
-UDPLITEv6              0      0   1728   18    8 : tunables    0    0    0 : slabdata      0      0      0
-UDPv6                  3     54   1728   18    8 : tunables    0    0    0 : slabdata      3      3      0
-tw_sock_TCPv6          0      0    576   28    4 : tunables    0    0    0 : slabdata      0      0      0
-request_sock_TCPv6      0      0    632   25    4 : tunables    0    0    0 : slabdata      0      0      0
-TCPv6                  0     33   2752   11    8 : tunables    0    0    0 : slabdata      3      3      0
-uhci_urb_priv          0      0    392   20    2 : tunables    0    0    0 : slabdata      0      0      0
-sgpool-128             2     14   4544    7    8 : tunables    0    0    0 : slabdata      2      2      0
-sgpool-64              2     13   2496   13    8 : tunables    0    0    0 : slabdata      1      1      0
-sgpool-32              2     44   1472   22    8 : tunables    0    0    0 : slabdata      2      2      0
-sgpool-16              2     68    960   34    8 : tunables    0    0    0 : slabdata      2      2      0
-sgpool-8               2     46    704   23    4 : tunables    0    0    0 : slabdata      2      2      0
-btree_node             0      0    576   28    4 : tunables    0    0    0 : slabdata      0      0      0
-bfq_io_cq              0      0    488   33    4 : tunables    0    0    0 : slabdata      0      0      0
-bfq_queue              0      0    848   38    8 : tunables    0    0    0 : slabdata      0      0      0
-mqueue_inode_cache      1     24   1344   24    8 : tunables    0    0    0 : slabdata      1      1      0
-isofs_inode_cache      0      0    968   33    8 : tunables    0    0    0 : slabdata      0      0      0
-io_kiocb               0      0    640   25    4 : tunables    0    0    0 : slabdata      0      0      0
-kioctx                 0     30   1088   30    8 : tunables    0    0    0 : slabdata      1      1      0
-aio_kiocb              0     28    576   28    4 : tunables    0    0    0 : slabdata      1      1      0
-userfaultfd_ctx_cache      0      0    576   28    4 : tunables    0    0    0 : slabdata      0      0      0
-fanotify_path_event      0      0    392   20    2 : tunables    0    0    0 : slabdata      0      0      0
-fanotify_fid_event      0      0    400   20    2 : tunables    0    0    0 : slabdata      0      0      0
-fsnotify_mark          0      0    408   20    2 : tunables    0    0    0 : slabdata      0      0      0
-dnotify_mark           0      0    416   39    4 : tunables    0    0    0 : slabdata      0      0      0
-dnotify_struct         0      0    368   22    2 : tunables    0    0    0 : slabdata      0      0      0
-dio                    0      0   1088   30    8 : tunables    0    0    0 : slabdata      0      0      0
-bio-2                  4     25    640   25    4 : tunables    0    0    0 : slabdata      1      1      0
-fasync_cache           0      0    384   21    2 : tunables    0    0    0 : slabdata      0      0      0
-audit_tree_mark        0      0    416   39    4 : tunables    0    0    0 : slabdata      0      0      0
-pid_namespace         30     34    480   34    4 : tunables    0    0    0 : slabdata      1      1      0
-posix_timers_cache      0     27    592   27    4 : tunables    0    0    0 : slabdata      1      1      0
-iommu_devinfo         24     32    512   32    4 : tunables    0    0    0 : slabdata      1      1      0
-iommu_domain          10     10   3264   10    8 : tunables    0    0    0 : slabdata      1      1      0
-iommu_iova          8682   8748    448   36    4 : tunables    0    0    0 : slabdata    243    243      0
-UNIX                 529    814   1472   22    8 : tunables    0    0    0 : slabdata     37     37      0
-ip4-frags              0      0    536   30    4 : tunables    0    0    0 : slabdata      0      0      0
-ip_mrt_cache           0      0    576   28    4 : tunables    0    0    0 : slabdata      0      0      0
-UDP-Lite               0      0   1536   21    8 : tunables    0    0    0 : slabdata      0      0      0
-tcp_bind_bucket        7    128    512   32    4 : tunables    0    0    0 : slabdata      4      4      0
-inet_peer_cache        0      0    576   28    4 : tunables    0    0    0 : slabdata      0      0      0
-xfrm_dst_cache         0      0    704   23    4 : tunables    0    0    0 : slabdata      0      0      0
-xfrm_state             0      0   1152   28    8 : tunables    0    0    0 : slabdata      0      0      0
-ip_fib_trie            7     21    384   21    2 : tunables    0    0    0 : slabdata      1      1      0
-ip_fib_alias           9     20    392   20    2 : tunables    0    0    0 : slabdata      1      1      0
-ip_dst_cache          27     84    576   28    4 : tunables    0    0    0 : slabdata      3      3      0
-PING                   0      0   1408   23    8 : tunables    0    0    0 : slabdata      0      0      0
-RAW                   32     46   1408   23    8 : tunables    0    0    0 : slabdata      2      2      0
-UDP                   11    168   1536   21    8 : tunables    0    0    0 : slabdata      8      8      0
-tw_sock_TCP            1     56    576   28    4 : tunables    0    0    0 : slabdata      2      2      0
-request_sock_TCP       0     25    632   25    4 : tunables    0    0    0 : slabdata      1      1      0
-TCP                   10     60   2624   12    8 : tunables    0    0    0 : slabdata      5      5      0
-hugetlbfs_inode_cache      2     35    928   35    8 : tunables    0    0    0 : slabdata      1      1      0
-dquot                  0      0    640   25    4 : tunables    0    0    0 : slabdata      0      0      0
-bio-1                 32     46    704   23    4 : tunables    0    0    0 : slabdata      2      2      0
-eventpoll_pwq        409    600    408   20    2 : tunables    0    0    0 : slabdata     30     30      0
-eventpoll_epi        408    672    576   28    4 : tunables    0    0    0 : slabdata     24     24      0
-inotify_inode_mark     58    195    416   39    4 : tunables    0    0    0 : slabdata      5      5      0
-scsi_data_buffer       0      0    360   22    2 : tunables    0    0    0 : slabdata      0      0      0
-bio_crypt_ctx        128    147    376   21    2 : tunables    0    0    0 : slabdata      7      7      0
-request_queue         29     39   2408   13    8 : tunables    0    0    0 : slabdata      3      3      0
-blkdev_ioc            81    148    440   37    4 : tunables    0    0    0 : slabdata      4      4      0
-bio-0                125    200    640   25    4 : tunables    0    0    0 : slabdata      8      8      0
-biovec-max           166    196   4544    7    8 : tunables    0    0    0 : slabdata     28     28      0
-biovec-128             0     52   2496   13    8 : tunables    0    0    0 : slabdata      4      4      0
-biovec-64              0     88   1472   22    8 : tunables    0    0    0 : slabdata      4      4      0
-biovec-16              0     92    704   23    4 : tunables    0    0    0 : slabdata      4      4      0
-bio_integrity_payload      4     28    576   28    4 : tunables    0    0    0 : slabdata      1      1      0
-khugepaged_mm_slot     59    180    448   36    4 : tunables    0    0    0 : slabdata      5      5      0
-ksm_mm_slot            0      0    384   21    2 : tunables    0    0    0 : slabdata      0      0      0
-ksm_stable_node        0      0    400   20    2 : tunables    0    0    0 : slabdata      0      0      0
-ksm_rmap_item          0      0    400   20    2 : tunables    0    0    0 : slabdata      0      0      0
-user_namespace         2     37    864   37    8 : tunables    0    0    0 : slabdata      1      1      0
-uid_cache              5     28    576   28    4 : tunables    0    0    0 : slabdata      1      1      0
-dmaengine-unmap-256      1     13   2496   13    8 : tunables    0    0    0 : slabdata      1      1      0
-dmaengine-unmap-128      1     22   1472   22    8 : tunables    0    0    0 : slabdata      1      1      0
-dmaengine-unmap-16      1     28    576   28    4 : tunables    0    0    0 : slabdata      1      1      0
-dmaengine-unmap-2      1     36    448   36    4 : tunables    0    0    0 : slabdata      1      1      0
-audit_buffer           0     22    360   22    2 : tunables    0    0    0 : slabdata      1      1      0
-sock_inode_cache     663   1170   1216   26    8 : tunables    0    0    0 : slabdata     45     45      0
-skbuff_ext_cache       0      0    576   28    4 : tunables    0    0    0 : slabdata      0      0      0
-skbuff_fclone_cache      1     72    896   36    8 : tunables    0    0    0 : slabdata      2      2      0
-skbuff_head_cache      3    650    640   25    4 : tunables    0    0    0 : slabdata     26     26      0
-configfs_dir_cache      7     38    424   38    4 : tunables    0    0    0 : slabdata      1      1      0
-file_lock_cache       27    116    552   29    4 : tunables    0    0    0 : slabdata      4      4      0
-file_lock_ctx        106    120    392   20    2 : tunables    0    0    0 : slabdata      6      6      0
-fsnotify_mark_connector     52     66    368   22    2 : tunables    0    0    0 : slabdata      3      3      0
-net_namespace          1      6   5312    6    8 : tunables    0    0    0 : slabdata      1      1      0
-task_delay_info      784   1560    416   39    4 : tunables    0    0    0 : slabdata     40     40      0
-taskstats             45     92    688   23    4 : tunables    0    0    0 : slabdata      4      4      0
-proc_dir_entry       678    682    528   31    4 : tunables    0    0    0 : slabdata     22     22      0
-pde_opener             0    189    376   21    2 : tunables    0    0    0 : slabdata      9      9      0
-proc_inode_cache    7150   8250    992   33    8 : tunables    0    0    0 : slabdata    250    250      0
-seq_file              60    735    456   35    4 : tunables    0    0    0 : slabdata     21     21      0
-sigqueue               0    156    416   39    4 : tunables    0    0    0 : slabdata      4      4      0
-bdev_cache            36     78   1216   26    8 : tunables    0    0    0 : slabdata      3      3      0
-shmem_inode_cache   1599   2208   1016   32    8 : tunables    0    0    0 : slabdata     69     69      0
-kernfs_iattrs_cache   1251   1254    424   38    4 : tunables    0    0    0 : slabdata     33     33      0
-kernfs_node_cache  52898  52920    464   35    4 : tunables    0    0    0 : slabdata   1512   1512      0
-mnt_cache             42     46    704   23    4 : tunables    0    0    0 : slabdata      2      2      0
-filp                4314   6371    704   23    4 : tunables    0    0    0 : slabdata    277    277      0
-inode_cache        28695  29505    920   35    8 : tunables    0    0    0 : slabdata    843    843      0
-dentry            166069 169074    528   31    4 : tunables    0    0    0 : slabdata   5454   5454      0
-names_cache            0     35   4544    7    8 : tunables    0    0    0 : slabdata      5      5      0
-hashtab_node           0      0    360   22    2 : tunables    0    0    0 : slabdata      0      0      0
-ebitmap_node           0      0    400   20    2 : tunables    0    0    0 : slabdata      0      0      0
-avtab_extended_perms      0      0    368   22    2 : tunables    0    0    0 : slabdata      0      0      0
-avtab_node             0      0    360   22    2 : tunables    0    0    0 : slabdata      0      0      0
-avc_xperms_data        0      0    368   22    2 : tunables    0    0    0 : slabdata      0      0      0
-avc_xperms_decision_node      0      0    384   21    2 : tunables    0    0    0 : slabdata      0      0      0
-avc_xperms_node        0      0    392   20    2 : tunables    0    0    0 : slabdata      0      0      0
-avc_node              37     40    408   20    2 : tunables    0    0    0 : slabdata      2      2      0
-iint_cache             0      0    448   36    4 : tunables    0    0    0 : slabdata      0      0      0
-lsm_inode_cache   122284 122340    392   20    2 : tunables    0    0    0 : slabdata   6117   6117      0
-lsm_file_cache      4266   4485    352   23    2 : tunables    0    0    0 : slabdata    195    195      0
-key_jar                8     25    640   25    4 : tunables    0    0    0 : slabdata      1      1      0
-buffer_head       255622 257076    440   37    4 : tunables    0    0    0 : slabdata   6948   6948      0
-uts_namespace          0      0    776   21    4 : tunables    0    0    0 : slabdata      0      0      0
-nsproxy               31     40    408   20    2 : tunables    0    0    0 : slabdata      2      2      0
-vm_area_struct     39115  43214    528   31    4 : tunables    0    0    0 : slabdata   1394   1394      0
-mm_struct             96    529   1408   23    8 : tunables    0    0    0 : slabdata     23     23      0
-fs_cache             102    756    448   36    4 : tunables    0    0    0 : slabdata     21     21      0
-files_cache          102    588   1152   28    8 : tunables    0    0    0 : slabdata     21     21      0
-signal_cache         266    672   1536   21    8 : tunables    0    0    0 : slabdata     32     32      0
-sighand_cache        266    507   2496   13    8 : tunables    0    0    0 : slabdata     39     39      0
-task_struct          783    963  10240    3    8 : tunables    0    0    0 : slabdata    321    321      0
-cred_jar             364    952    576   28    4 : tunables    0    0    0 : slabdata     34     34      0
-anon_vma_chain     63907  67821    416   39    4 : tunables    0    0    0 : slabdata   1739   1739      0
-anon_vma           25891  28899    416   39    4 : tunables    0    0    0 : slabdata    741    741      0
-pid                  408    992    512   32    4 : tunables    0    0    0 : slabdata     31     31      0
-Acpi-Operand        6682   6740    408   20    2 : tunables    0    0    0 : slabdata    337    337      0
-Acpi-ParseExt          0     39    416   39    4 : tunables    0    0    0 : slabdata      1      1      0
-Acpi-Parse             0     80    392   20    2 : tunables    0    0    0 : slabdata      4      4      0
-Acpi-State             0     78    416   39    4 : tunables    0    0    0 : slabdata      2      2      0
-Acpi-Namespace      3911   3948    384   21    2 : tunables    0    0    0 : slabdata    188    188      0
-trace_event_file    2638   2660    424   38    4 : tunables    0    0    0 : slabdata     70     70      0
-ftrace_event_field   6592   6594    384   21    2 : tunables    0    0    0 : slabdata    314    314      0
-pool_workqueue        41     64   1024   32    8 : tunables    0    0    0 : slabdata      2      2      0
-radix_tree_node    21638  24045    912   35    8 : tunables    0    0    0 : slabdata    687    687      0
-task_group            48     78   1216   26    8 : tunables    0    0    0 : slabdata      3      3      0
-vmap_area           4411   4680    400   20    2 : tunables    0    0    0 : slabdata    234    234      0
-dma-kmalloc-8k         0      0  24576    1    8 : tunables    0    0    0 : slabdata      0      0      0
-dma-kmalloc-4k         0      0  12288    2    8 : tunables    0    0    0 : slabdata      0      0      0
-dma-kmalloc-2k         0      0   6144    5    8 : tunables    0    0    0 : slabdata      0      0      0
-dma-kmalloc-1k         0      0   3072   10    8 : tunables    0    0    0 : slabdata      0      0      0
-dma-kmalloc-512        0      0   1536   21    8 : tunables    0    0    0 : slabdata      0      0      0
-dma-kmalloc-256        0      0   1024   32    8 : tunables    0    0    0 : slabdata      0      0      0
-dma-kmalloc-128        0      0    640   25    4 : tunables    0    0    0 : slabdata      0      0      0
-dma-kmalloc-64         0      0    512   32    4 : tunables    0    0    0 : slabdata      0      0      0
-dma-kmalloc-32         0      0    416   39    4 : tunables    0    0    0 : slabdata      0      0      0
-dma-kmalloc-16         0      0    368   22    2 : tunables    0    0    0 : slabdata      0      0      0
-dma-kmalloc-8          0      0    344   23    2 : tunables    0    0    0 : slabdata      0      0      0
-dma-kmalloc-192        0      0    528   31    4 : tunables    0    0    0 : slabdata      0      0      0
-dma-kmalloc-96         0      0    432   37    4 : tunables    0    0    0 : slabdata      0      0      0
-kmalloc-rcl-8k         0      0  24576    1    8 : tunables    0    0    0 : slabdata      0      0      0
-kmalloc-rcl-4k         0      0  12288    2    8 : tunables    0    0    0 : slabdata      0      0      0
-kmalloc-rcl-2k         0      0   6144    5    8 : tunables    0    0    0 : slabdata      0      0      0
-kmalloc-rcl-1k         0      0   3072   10    8 : tunables    0    0    0 : slabdata      0      0      0
-kmalloc-rcl-512        0      0   1536   21    8 : tunables    0    0    0 : slabdata      0      0      0
-kmalloc-rcl-256        0      0   1024   32    8 : tunables    0    0    0 : slabdata      0      0      0
-kmalloc-rcl-192        0      0    528   31    4 : tunables    0    0    0 : slabdata      0      0      0
-kmalloc-rcl-128       31     75    640   25    4 : tunables    0    0    0 : slabdata      3      3      0
-kmalloc-rcl-96      3371   3626    432   37    4 : tunables    0    0    0 : slabdata     98     98      0
-kmalloc-rcl-64      2080   2272    512   32    4 : tunables    0    0    0 : slabdata     71     71      0
-kmalloc-rcl-32         0      0    416   39    4 : tunables    0    0    0 : slabdata      0      0      0
-kmalloc-rcl-16         0      0    368   22    2 : tunables    0    0    0 : slabdata      0      0      0
-kmalloc-rcl-8          0      0    344   23    2 : tunables    0    0    0 : slabdata      0      0      0
-kmalloc-8k           133    140  24576    1    8 : tunables    0    0    0 : slabdata    140    140      0
-kmalloc-4k           403    444  12288    2    8 : tunables    0    0    0 : slabdata    222    222      0
-kmalloc-2k          2391   2585   6144    5    8 : tunables    0    0    0 : slabdata    517    517      0
-kmalloc-1k          2163   2420   3072   10    8 : tunables    0    0    0 : slabdata    242    242      0
-kmalloc-512         2972   3633   1536   21    8 : tunables    0    0    0 : slabdata    173    173      0
-kmalloc-256         1841   1856   1024   32    8 : tunables    0    0    0 : slabdata     58     58      0
-kmalloc-192         2165   2914    528   31    4 : tunables    0    0    0 : slabdata     94     94      0
-kmalloc-128         1137   1175    640   25    4 : tunables    0    0    0 : slabdata     47     47      0
-kmalloc-96          1925   2590    432   37    4 : tunables    0    0    0 : slabdata     70     70      0
-kmalloc-64          9433  10688    512   32    4 : tunables    0    0    0 : slabdata    334    334      0
-kmalloc-32          9098  10062    416   39    4 : tunables    0    0    0 : slabdata    258    258      0
-kmalloc-16         10914  10956    368   22    2 : tunables    0    0    0 : slabdata    498    498      0
-kmalloc-8           7576   7705    344   23    2 : tunables    0    0    0 : slabdata    335    335      0
-kmem_cache_node      904    928    512   32    4 : tunables    0    0    0 : slabdata     29     29      0
-kmem_cache           904    936    832   39    8 : tunables    0    0    0 : slabdata     24     24      0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/stat
-Lines: 16
-cpu  301854 612 111922 8979004 3552 2 3944 0 0 0
-cpu0 44490 19 21045 1087069 220 1 3410 0 0 0
-cpu1 47869 23 16474 1110787 591 0 46 0 0 0
-cpu2 46504 36 15916 1112321 441 0 326 0 0 0
-cpu3 47054 102 15683 1113230 533 0 60 0 0 0
-cpu4 28413 25 10776 1140321 217 0 8 0 0 0
-cpu5 29271 101 11586 1136270 672 0 30 0 0 0
-cpu6 29152 36 10276 1139721 319 0 29 0 0 0
-cpu7 29098 268 10164 1139282 555 0 31 0 0 0
-intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-ctxt 38014093
-btime 1418183276
-processes 26442
-procs_running 2
-procs_blocked 1
-softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/swaps
-Lines: 2
-Filename				Type		Size	Used	Priority
-/dev/dm-2                               partition	131068	176	-2
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/symlinktargets
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/README
-Lines: 2
-This directory contains some empty files that are the symlinks the files in the "fd" directory point to.
-They are otherwise ignored by the tests
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/abc
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/def
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/ghi
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/uvw
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/xyz
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/sys
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/sys/kernel
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/sys/kernel/random
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/kernel/random/entropy_avail
-Lines: 1
-3943
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/kernel/random/poolsize
-Lines: 1
-4096
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/kernel/random/urandom_min_reseed_secs
-Lines: 1
-60
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/kernel/random/write_wakeup_threshold
-Lines: 1
-3072
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/sys/vm
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/admin_reserve_kbytes
-Lines: 1
-8192
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/block_dump
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/compact_unevictable_allowed
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_background_bytes
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_background_ratio
-Lines: 1
-10
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_bytes
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_expire_centisecs
-Lines: 1
-3000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_ratio
-Lines: 1
-20
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_writeback_centisecs
-Lines: 1
-500
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirtytime_expire_seconds
-Lines: 1
-43200
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/drop_caches
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/extfrag_threshold
-Lines: 1
-500
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/hugetlb_shm_group
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/laptop_mode
-Lines: 1
-5
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/legacy_va_layout
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/lowmem_reserve_ratio
-Lines: 1
-256	256	32	0	0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/max_map_count
-Lines: 1
-65530
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/memory_failure_early_kill
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/memory_failure_recovery
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/min_free_kbytes
-Lines: 1
-67584
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/min_slab_ratio
-Lines: 1
-5
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/min_unmapped_ratio
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/mmap_min_addr
-Lines: 1
-65536
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/nr_hugepages
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/nr_hugepages_mempolicy
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/nr_overcommit_hugepages
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/numa_stat
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/numa_zonelist_order
-Lines: 1
-Node
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/oom_dump_tasks
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/oom_kill_allocating_task
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/overcommit_kbytes
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/overcommit_memory
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/overcommit_ratio
-Lines: 1
-50
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/page-cluster
-Lines: 1
-3
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/panic_on_oom
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/percpu_pagelist_fraction
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/stat_interval
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/swappiness
-Lines: 1
-60
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/user_reserve_kbytes
-Lines: 1
-131072
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/vfs_cache_pressure
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/watermark_boost_factor
-Lines: 1
-15000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/watermark_scale_factor
-Lines: 1
-10
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/zone_reclaim_mode
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/zoneinfo
-Lines: 262
-Node 0, zone      DMA
-  per-node stats
-      nr_inactive_anon 230981
-      nr_active_anon 547580
-      nr_inactive_file 316904
-      nr_active_file 346282
-      nr_unevictable 115467
-      nr_slab_reclaimable 131220
-      nr_slab_unreclaimable 47320
-      nr_isolated_anon 0
-      nr_isolated_file 0
-      workingset_nodes 11627
-      workingset_refault 466886
-      workingset_activate 276925
-      workingset_restore 84055
-      workingset_nodereclaim 487
-      nr_anon_pages 795576
-      nr_mapped    215483
-      nr_file_pages 761874
-      nr_dirty     908
-      nr_writeback 0
-      nr_writeback_temp 0
-      nr_shmem     224925
-      nr_shmem_hugepages 0
-      nr_shmem_pmdmapped 0
-      nr_anon_transparent_hugepages 0
-      nr_unstable  0
-      nr_vmscan_write 12950
-      nr_vmscan_immediate_reclaim 3033
-      nr_dirtied   8007423
-      nr_written   7752121
-      nr_kernel_misc_reclaimable 0
-  pages free     3952
-        min      33
-        low      41
-        high     49
-        spanned  4095
-        present  3975
-        managed  3956
-        protection: (0, 2877, 7826, 7826, 7826)
-      nr_free_pages 3952
-      nr_zone_inactive_anon 0
-      nr_zone_active_anon 0
-      nr_zone_inactive_file 0
-      nr_zone_active_file 0
-      nr_zone_unevictable 0
-      nr_zone_write_pending 0
-      nr_mlock     0
-      nr_page_table_pages 0
-      nr_kernel_stack 0
-      nr_bounce    0
-      nr_zspages   0
-      nr_free_cma  0
-      numa_hit     1
-      numa_miss    0
-      numa_foreign 0
-      numa_interleave 0
-      numa_local   1
-      numa_other   0
-  pagesets
-    cpu: 0
-              count: 0
-              high:  0
-              batch: 1
-  vm stats threshold: 8
-    cpu: 1
-              count: 0
-              high:  0
-              batch: 1
-  vm stats threshold: 8
-    cpu: 2
-              count: 0
-              high:  0
-              batch: 1
-  vm stats threshold: 8
-    cpu: 3
-              count: 0
-              high:  0
-              batch: 1
-  vm stats threshold: 8
-    cpu: 4
-              count: 0
-              high:  0
-              batch: 1
-  vm stats threshold: 8
-    cpu: 5
-              count: 0
-              high:  0
-              batch: 1
-  vm stats threshold: 8
-    cpu: 6
-              count: 0
-              high:  0
-              batch: 1
-  vm stats threshold: 8
-    cpu: 7
-              count: 0
-              high:  0
-              batch: 1
-  vm stats threshold: 8
-  node_unreclaimable:  0
-  start_pfn:           1
-Node 0, zone    DMA32
-  pages free     204252
-        min      19510
-        low      21059
-        high     22608
-        spanned  1044480
-        present  759231
-        managed  742806
-        protection: (0, 0, 4949, 4949, 4949)
-      nr_free_pages 204252
-      nr_zone_inactive_anon 118558
-      nr_zone_active_anon 106598
-      nr_zone_inactive_file 75475
-      nr_zone_active_file 70293
-      nr_zone_unevictable 66195
-      nr_zone_write_pending 64
-      nr_mlock     4
-      nr_page_table_pages 1756
-      nr_kernel_stack 2208
-      nr_bounce    0
-      nr_zspages   0
-      nr_free_cma  0
-      numa_hit     113952967
-      numa_miss    0
-      numa_foreign 0
-      numa_interleave 0
-      numa_local   113952967
-      numa_other   0
-  pagesets
-    cpu: 0
-              count: 345
-              high:  378
-              batch: 63
-  vm stats threshold: 48
-    cpu: 1
-              count: 356
-              high:  378
-              batch: 63
-  vm stats threshold: 48
-    cpu: 2
-              count: 325
-              high:  378
-              batch: 63
-  vm stats threshold: 48
-    cpu: 3
-              count: 346
-              high:  378
-              batch: 63
-  vm stats threshold: 48
-    cpu: 4
-              count: 321
-              high:  378
-              batch: 63
-  vm stats threshold: 48
-    cpu: 5
-              count: 316
-              high:  378
-              batch: 63
-  vm stats threshold: 48
-    cpu: 6
-              count: 373
-              high:  378
-              batch: 63
-  vm stats threshold: 48
-    cpu: 7
-              count: 339
-              high:  378
-              batch: 63
-  vm stats threshold: 48
-  node_unreclaimable:  0
-  start_pfn:           4096
-Node 0, zone   Normal
-  pages free     18553
-        min      11176
-        low      13842
-        high     16508
-        spanned  1308160
-        present  1308160
-        managed  1268711
-        protection: (0, 0, 0, 0, 0)
-      nr_free_pages 18553
-      nr_zone_inactive_anon 112423
-      nr_zone_active_anon 440982
-      nr_zone_inactive_file 241429
-      nr_zone_active_file 275989
-      nr_zone_unevictable 49272
-      nr_zone_write_pending 844
-      nr_mlock     154
-      nr_page_table_pages 9750
-      nr_kernel_stack 15136
-      nr_bounce    0
-      nr_zspages   0
-      nr_free_cma  0
-      numa_hit     162718019
-      numa_miss    0
-      numa_foreign 0
-      numa_interleave 26812
-      numa_local   162718019
-      numa_other   0
-  pagesets
-    cpu: 0
-              count: 316
-              high:  378
-              batch: 63
-  vm stats threshold: 56
-    cpu: 1
-              count: 366
-              high:  378
-              batch: 63
-  vm stats threshold: 56
-    cpu: 2
-              count: 60
-              high:  378
-              batch: 63
-  vm stats threshold: 56
-    cpu: 3
-              count: 256
-              high:  378
-              batch: 63
-  vm stats threshold: 56
-    cpu: 4
-              count: 253
-              high:  378
-              batch: 63
-  vm stats threshold: 56
-    cpu: 5
-              count: 159
-              high:  378
-              batch: 63
-  vm stats threshold: 56
-    cpu: 6
-              count: 311
-              high:  378
-              batch: 63
-  vm stats threshold: 56
-    cpu: 7
-              count: 264
-              high:  378
-              batch: 63
-  vm stats threshold: 56
-  node_unreclaimable:  0
-  start_pfn:           1048576
-Node 0, zone  Movable
-  pages free     0
-        min      0
-        low      0
-        high     0
-        spanned  0
-        present  0
-        managed  0
-        protection: (0, 0, 0, 0, 0)
-Node 0, zone   Device
-  pages free     0
-        min      0
-        low      0
-        high     0
-        spanned  0
-        present  0
-        managed  0
-        protection: (0, 0, 0, 0, 0)
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/block
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/block/dm-0
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/dm-0/stat
-Lines: 1
-6447303        0 710266738  1529043   953216        0 31201176  4557464        0   796160  6088971
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/block/sda
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/block/sda/queue
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/add_random
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/chunk_sectors
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/dax
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/discard_granularity
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/discard_max_bytes
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/discard_max_hw_bytes
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/discard_zeroes_data
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/fua
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/hw_sector_size
-Lines: 1
-512
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/io_poll
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/io_poll_delay
-Lines: 1
--1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/io_timeout
-Lines: 1
-30000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/block/sda/queue/iosched
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/back_seek_max
-Lines: 1
-16384
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/back_seek_penalty
-Lines: 1
-2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_async
-Lines: 1
-250
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_sync
-Lines: 1
-125
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/low_latency
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/max_budget
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/slice_idle
-Lines: 1
-8
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/slice_idle_us
-Lines: 1
-8000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/strict_guarantees
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/timeout_sync
-Lines: 1
-125
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iostats
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/logical_block_size
-Lines: 1
-512
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_discard_segments
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_hw_sectors_kb
-Lines: 1
-32767
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_integrity_segments
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_sectors_kb
-Lines: 1
-1280
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_segment_size
-Lines: 1
-65536
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_segments
-Lines: 1
-168
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/minimum_io_size
-Lines: 1
-512
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/nomerges
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/nr_requests
-Lines: 1
-64
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/nr_zones
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/optimal_io_size
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/physical_block_size
-Lines: 1
-512
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/read_ahead_kb
-Lines: 1
-128
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/rotational
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/rq_affinity
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/scheduler
-Lines: 1
-mq-deadline kyber [bfq] none
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/wbt_lat_usec
-Lines: 1
-75000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/write_cache
-Lines: 1
-write back
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/write_same_max_bytes
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/write_zeroes_max_bytes
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/zoned
-Lines: 1
-none
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/stat
-Lines: 1
-9652963   396792 759304206   412943  8422549  6731723 286915323 13947418        0  5658367 19174573 1 2 3 12
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/fc_host
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/fc_host/host0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/dev_loss_tmo
-Lines: 1
-30
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/fabric_name
-Lines: 1
-0x0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/node_name
-Lines: 1
-0x2000e0071bce95f2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/port_id
-Lines: 1
-0x000002
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/port_name
-Lines: 1
-0x1000e0071bce95f2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/port_state
-Lines: 1
-Online
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/port_type
-Lines: 1
-Point-To-Point (direct nport connection)
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/speed
-Lines: 1
-16 Gbit
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/fc_host/host0/statistics
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/dumped_frames
-Lines: 1
-0xffffffffffffffff
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/error_frames
-Lines: 1
-0x0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/fcp_packet_aborts
-Lines: 1
-0x13
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/invalid_crc_count
-Lines: 1
-0x2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/invalid_tx_word_count
-Lines: 1
-0x8
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/link_failure_count
-Lines: 1
-0x9
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_signal_count
-Lines: 1
-0x11
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_sync_count
-Lines: 1
-0x10
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/nos_count
-Lines: 1
-0x12
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/rx_frames
-Lines: 1
-0x3
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/rx_words
-Lines: 1
-0x4
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/seconds_since_last_reset
-Lines: 1
-0x7
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/tx_frames
-Lines: 1
-0x5
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/tx_words
-Lines: 1
-0x6
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/supported_classes
-Lines: 1
-Class 3
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/supported_speeds
-Lines: 1
-4 Gbit, 8 Gbit, 16 Gbit
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/symbolic_name
-Lines: 1
-Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/board_id
-Lines: 1
-SM_1141000001000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/fw_ver
-Lines: 1
-2.31.5050
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/hca_type
-Lines: 1
-MT4099
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0/ports
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/VL15_dropped
-Lines: 1
-0
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_downed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_error_recovery
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/local_link_integrity_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_constraint_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_data
-Lines: 1
-2221223609
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_packets
-Lines: 1
-87169372
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_remote_physical_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_switch_relay_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_constraint_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_data
-Lines: 1
-26509113295
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_discards
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_packets
-Lines: 1
-85734114
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_wait
-Lines: 1
-3599
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/symbol_error
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/phys_state
-Lines: 1
-5: LinkUp
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/rate
-Lines: 1
-40 Gb/sec (4X QDR)
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/state
-Lines: 1
-4: ACTIVE
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/VL15_dropped
-Lines: 1
-0
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_downed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_error_recovery
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/local_link_integrity_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_constraint_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_data
-Lines: 1
-2460436784
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_packets
-Lines: 1
-89332064
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_remote_physical_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_switch_relay_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_constraint_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_data
-Lines: 1
-26540356890
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_discards
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_packets
-Lines: 1
-88622850
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_wait
-Lines: 1
-3846
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/symbol_error
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/phys_state
-Lines: 1
-5: LinkUp
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/rate
-Lines: 1
-40 Gb/sec (4X QDR)
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/state
-Lines: 1
-4: ACTIVE
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/net
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/net/eth0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/addr_assign_type
-Lines: 1
-3
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/addr_len
-Lines: 1
-6
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/address
-Lines: 1
-01:01:01:01:01:01
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/broadcast
-Lines: 1
-ff:ff:ff:ff:ff:ff
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/carrier
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/carrier_changes
-Lines: 1
-2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/carrier_down_count
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/carrier_up_count
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/dev_id
-Lines: 1
-0x20
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/device
-SymlinkTo: ../../../devices/pci0000:00/0000:00:1f.6/
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/dormant
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/duplex
-Lines: 1
-full
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/flags
-Lines: 1
-0x1303
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/ifalias
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/ifindex
-Lines: 1
-2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/iflink
-Lines: 1
-2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/link_mode
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/mtu
-Lines: 1
-1500
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/name_assign_type
-Lines: 1
-2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/netdev_group
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/operstate
-Lines: 1
-up
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/phys_port_id
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/phys_port_name
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/phys_switch_id
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/speed
-Lines: 1
-1000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/tx_queue_len
-Lines: 1
-1000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/type
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/power_supply
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/power_supply/AC
-SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/power_supply/BAT0
-SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/powercap
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/powercap/intel-rapl
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl/enabled
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl/uevent
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/powercap/intel-rapl:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_max_power_uw
-Lines: 1
-95000000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_name
-Lines: 1
-long_term
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_power_limit_uw
-Lines: 1
-4090000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_time_window_us
-Lines: 1
-999424
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_max_power_uw
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_name
-Lines: 1
-short_term
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_power_limit_uw
-Lines: 1
-4090000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_time_window_us
-Lines: 1
-2440
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/enabled
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/energy_uj
-Lines: 1
-240422366267
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/max_energy_range_uj
-Lines: 1
-262143328850
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/name
-Lines: 1
-package-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/uevent
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/powercap/intel-rapl:0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_max_power_uw
-Lines: 0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_name
-Lines: 1
-long_term
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_power_limit_uw
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_time_window_us
-Lines: 1
-976
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/enabled
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/energy_uj
-Lines: 1
-118821284256
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/max_energy_range_uj
-Lines: 1
-262143328850
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/name
-Lines: 1
-core
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/uevent
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/powercap/intel-rapl:a
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_max_power_uw
-Lines: 1
-95000000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_name
-Lines: 1
-long_term
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_power_limit_uw
-Lines: 1
-4090000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_time_window_us
-Lines: 1
-999424
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_max_power_uw
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_name
-Lines: 1
-short_term
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_power_limit_uw
-Lines: 1
-4090000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_time_window_us
-Lines: 1
-2440
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/enabled
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/energy_uj
-Lines: 1
-240422366267
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/max_energy_range_uj
-Lines: 1
-262143328850
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/name
-Lines: 1
-package-10
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/uevent
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal/cooling_device0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device0/cur_state
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device0/max_state
-Lines: 1
-50
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device0/type
-Lines: 1
-Processor
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal/cooling_device1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device1/cur_state
-Lines: 1
--1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device1/max_state
-Lines: 1
-27
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device1/type
-Lines: 1
-intel_powerclamp
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal/thermal_zone0
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone0/policy
-Lines: 1
-step_wise
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone0/temp
-Lines: 1
-49925
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone0/type
-Lines: 1
-bcm2835_thermal
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal/thermal_zone1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/mode
-Lines: 1
-enabled
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/passive
-Lines: 1
-0
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/policy
-Lines: 1
-step_wise
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/temp
-Lines: 1
--44000
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/type
-Lines: 1
-acpitz
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/device
-SymlinkTo: ../../../ACPI0003:00
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/online
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/async
-Lines: 1
-disabled
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/autosuspend_delay_ms
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/control
-Lines: 1
-auto
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_kids
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_time
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_enabled
-Lines: 1
-disabled
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_status
-Lines: 1
-unsupported
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_suspended_time
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_usage
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup
-Lines: 1
-enabled
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_abort_count
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active_count
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_count
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_expire_count
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_last_time_ms
-Lines: 1
-10598
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_max_time_ms
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_prevent_sleep_time_ms
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_total_time_ms
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/subsystem
-SymlinkTo: ../../../../../../../../../class/power_supply
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/type
-Lines: 1
-Mains
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/uevent
-Lines: 2
-POWER_SUPPLY_NAME=AC
-POWER_SUPPLY_ONLINE=0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/alarm
-Lines: 1
-2369000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity
-Lines: 1
-98
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity_level
-Lines: 1
-Normal
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_start_threshold
-Lines: 1
-95
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_stop_threshold
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/cycle_count
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/device
-SymlinkTo: ../../../PNP0C0A:00
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full
-Lines: 1
-50060000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full_design
-Lines: 1
-47520000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_now
-Lines: 1
-49450000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/manufacturer
-Lines: 1
-LGC
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/model_name
-Lines: 1
-LNV-45N1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/async
-Lines: 1
-disabled
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/autosuspend_delay_ms
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/control
-Lines: 1
-auto
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_kids
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_time
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_enabled
-Lines: 1
-disabled
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_status
-Lines: 1
-unsupported
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_suspended_time
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_usage
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power_now
-Lines: 1
-4830000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/present
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/serial_number
-Lines: 1
-38109
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/status
-Lines: 1
-Discharging
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/subsystem
-SymlinkTo: ../../../../../../../../../class/power_supply
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/technology
-Lines: 1
-Li-ion
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/type
-Lines: 1
-Battery
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/uevent
-Lines: 16
-POWER_SUPPLY_NAME=BAT0
-POWER_SUPPLY_STATUS=Discharging
-POWER_SUPPLY_PRESENT=1
-POWER_SUPPLY_TECHNOLOGY=Li-ion
-POWER_SUPPLY_CYCLE_COUNT=0
-POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000
-POWER_SUPPLY_VOLTAGE_NOW=11750000
-POWER_SUPPLY_POWER_NOW=5064000
-POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000
-POWER_SUPPLY_ENERGY_FULL=47390000
-POWER_SUPPLY_ENERGY_NOW=40730000
-POWER_SUPPLY_CAPACITY=85
-POWER_SUPPLY_CAPACITY_LEVEL=Normal
-POWER_SUPPLY_MODEL_NAME=LNV-45N1
-POWER_SUPPLY_MANUFACTURER=LGC
-POWER_SUPPLY_SERIAL_NUMBER=38109
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_min_design
-Lines: 1
-10800000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_now
-Lines: 1
-12229000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits
-Lines: 1
-289
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits
-Lines: 1
-546
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written
-Lines: 1
-512
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats
-Lines: 5
-Unused:		99%
-Metadata:	0%
-Average:	10473
-Sectors per Q:	64
-Quantiles:	[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946]
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:1f.6
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/ari_enabled
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/broken_parity_status
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/class
-Lines: 1
-0x020000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/consistent_dma_mask_bits
-Lines: 1
-64
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/d3cold_allowed
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/device
-Lines: 1
-0x15d7
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/dma_mask_bits
-Lines: 1
-64
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/driver_override
-Lines: 1
-(null)
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/irq
-Lines: 1
-140
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpulist
-Lines: 1
-0-7
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpus
-Lines: 1
-ff
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/modalias
-Lines: 1
-pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/msi_bus
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/numa_node
-Lines: 1
--1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/resource
-Lines: 13
-0x00000000ec200000 0x00000000ec21ffff 0x0000000000040200
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/revision
-Lines: 1
-0x21
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_device
-Lines: 1
-0x225a
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_vendor
-Lines: 1
-0x17aa
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/uevent
-Lines: 6
-DRIVER=e1000e
-PCI_CLASS=20000
-PCI_ID=8086:15D7
-PCI_SUBSYS_ID=17AA:225A
-PCI_SLOT_NAME=0000:00:1f.6
-MODALIAS=pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/vendor
-Lines: 1
-0x8086
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/rbd
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/rbd/0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/rbd/0/name
-Lines: 1
-demo
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/rbd/0/pool
-Lines: 1
-iscsi-images
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/rbd/1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/rbd/1/name
-Lines: 1
-wrong
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/rbd/1/pool
-Lines: 1
-wrong-images
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/node
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/node/node1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/node/node1/vmstat
-Lines: 6
-nr_free_pages 1
-nr_zone_inactive_anon 2
-nr_zone_active_anon 3
-nr_zone_inactive_file 4
-nr_zone_active_file 5
-nr_zone_unevictable 6
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/node/node2
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/node/node2/vmstat
-Lines: 6
-nr_free_pages 7
-nr_zone_inactive_anon 8
-nr_zone_active_anon 9
-nr_zone_inactive_file 10
-nr_zone_active_file 11
-nr_zone_unevictable 12
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/clocksource
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/clocksource/clocksource0
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/clocksource/clocksource0/available_clocksource
-Lines: 1
-tsc hpet acpi_pm 
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/clocksource/clocksource0/current_clocksource
-Lines: 1
-tsc
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu0
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/cpufreq
-SymlinkTo: ../cpufreq/policy0
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count
-Lines: 1
-10084
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/package_throttle_count
-Lines: 1
-34818
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu0/topology
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_id
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings
-Lines: 1
-ff
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings_list
-Lines: 1
-0-7
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/physical_package_id
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings
-Lines: 1
-11
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings_list
-Lines: 1
-0,4
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu1
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu1/cpufreq
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq
-Lines: 1
-1200195
-Mode: 400
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq
-Lines: 1
-3300000
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_min_freq
-Lines: 1
-1200000
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency
-Lines: 1
-4294967295
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/related_cpus
-Lines: 1
-1
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors
-Lines: 1
-performance powersave
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_driver
-Lines: 1
-intel_pstate
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
-Lines: 1
-powersave
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq
-Lines: 1
-3300000
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq
-Lines: 1
-1200000
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed
-Lines: 1
-<unsupported>
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/core_throttle_count
-Lines: 1
-523
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/package_throttle_count
-Lines: 1
-34818
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu1/topology
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_id
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings
-Lines: 1
-ff
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings_list
-Lines: 1
-0-7
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/physical_package_id
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings
-Lines: 1
-22
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings_list
-Lines: 1
-1,5
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpufreq
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpufreq/policy0
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/affected_cpus
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_max_freq
-Lines: 1
-2400000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_min_freq
-Lines: 1
-800000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_transition_latency
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/related_cpus
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_available_governors
-Lines: 1
-performance powersave
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_cur_freq
-Lines: 1
-1219917
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_driver
-Lines: 1
-intel_pstate
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_governor
-Lines: 1
-powersave
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_max_freq
-Lines: 1
-2400000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_min_freq
-Lines: 1
-800000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_setspeed
-Lines: 1
-<unsupported>
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpufreq/policy1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0
-Mode: 777
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/dirty_data
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hits
-Lines: 1
-289
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hits
-Lines: 1
-546
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/writeback_rate_debug
-Lines: 7
-rate:           1.1M/sec
-dirty:          20.4G
-target:         20.4G
-proportional:   427.5k
-integral:       790.0k
-change:         321.5k/sec
-next io:        17ms
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0
-Mode: 777
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/io_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/metadata_written
-Lines: 1
-512
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/priority_stats
-Lines: 5
-Unused:		99%
-Metadata:	0%
-Average:	10473
-Sectors per Q:	64
-Quantiles:	[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946]
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/written
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us
-Lines: 1
-1305
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits
-Lines: 1
-289
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits
-Lines: 1
-546
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_may_use
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_readonly
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_used
-Lines: 1
-808189952
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_total
-Lines: 1
-2147483648
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_used
-Lines: 1
-808189952
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/flags
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/total_bytes
-Lines: 1
-2147483648
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/used_bytes
-Lines: 1
-808189952
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes
-Lines: 1
-2147483648
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_reserved
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_size
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_may_use
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_readonly
-Lines: 1
-131072
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_used
-Lines: 1
-933888
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_total
-Lines: 1
-2147483648
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_used
-Lines: 1
-1867776
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/flags
-Lines: 1
-4
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/total_bytes
-Lines: 1
-1073741824
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/used_bytes
-Lines: 1
-933888
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes
-Lines: 1
-1073741824
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_may_use
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_readonly
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_used
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_total
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_used
-Lines: 1
-32768
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/flags
-Lines: 1
-2
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/total_bytes
-Lines: 1
-8388608
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/used_bytes
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes
-Lines: 1
-8388608
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/clone_alignment
-Lines: 1
-4096
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25/size
-Lines: 1
-20971520
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26/size
-Lines: 1
-20971520
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/big_metadata
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/extended_iref
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/mixed_backref
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/skinny_metadata
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/label
-Lines: 1
-fixture
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/metadata_uuid
-Lines: 1
-0abb23a9-579b-43e6-ad30-227ef47fcb9d
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/nodesize
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/quota_override
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/sectorsize
-Lines: 1
-4096
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_may_use
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_readonly
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_used
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_total
-Lines: 1
-644087808
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_used
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/flags
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/total_bytes
-Lines: 1
-644087808
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/used_bytes
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes
-Lines: 1
-644087808
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_reserved
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_size
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_may_use
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_readonly
-Lines: 1
-262144
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_used
-Lines: 1
-114688
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_total
-Lines: 1
-429391872
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_used
-Lines: 1
-114688
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/flags
-Lines: 1
-4
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/total_bytes
-Lines: 1
-429391872
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/used_bytes
-Lines: 1
-114688
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes
-Lines: 1
-429391872
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_may_use
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_readonly
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_used
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_total
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_used
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/flags
-Lines: 1
-2
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/total_bytes
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/used_bytes
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/clone_alignment
-Lines: 1
-4096
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop22
-SymlinkTo: ../../../../devices/virtual/block/loop22
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop23
-SymlinkTo: ../../../../devices/virtual/block/loop23
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop24
-SymlinkTo: ../../../../devices/virtual/block/loop24
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop25
-SymlinkTo: ../../../../devices/virtual/block/loop25
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/big_metadata
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/extended_iref
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/mixed_backref
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/raid56
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/skinny_metadata
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/label
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/metadata_uuid
-Lines: 1
-7f07c59f-6136-449c-ab87-e1cf2328731b
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/nodesize
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/quota_override
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/sectorsize
-Lines: 1
-4096
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/xfs
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/xfs/sda1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/xfs/sda1/stats
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/xfs/sda1/stats/stats
-Lines: 1
-extent_alloc 1 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/xfs/sdb1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/xfs/sdb1/stats
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/xfs/sdb1/stats/stats
-Lines: 1
-extent_alloc 2 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/fileio_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/fileio_1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/udev_path
-Lines: 1
-/home/iscsi/file_back_1G
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/iblock_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/udev_path
-Lines: 1
-/dev/rbd1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/rbd_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/udev_path
-Lines: 1
-/dev/rbd/iscsi-images/demo
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/udev_path
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/7f4a4eb56d
-SymlinkTo: ../../../../../../target/core/rd_mcp_119/ramdisk_lio_1G
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds
-Lines: 1
-204950
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes
-Lines: 1
-10325
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes
-Lines: 1
-40325
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/795b7c7026
-SymlinkTo: ../../../../../../target/core/iblock_0/block_lio_rbd1
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds
-Lines: 1
-104950
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes
-Lines: 1
-20095
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes
-Lines: 1
-71235
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/fff5e16686
-SymlinkTo: ../../../../../../target/core/fileio_1/file_lio_1G
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds
-Lines: 1
-301950
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes
-Lines: 1
-10195
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes
-Lines: 1
-30195
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/eba1edf893
-SymlinkTo: ../../../../../../target/core/rbd_0/iscsi-images-demo
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds
-Lines: 1
-1234
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes
-Lines: 1
-1504
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes
-Lines: 1
-4733
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
index 0102ab0..9bdaccc 100644
--- a/vendor/github.com/prometheus/procfs/fs.go
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -20,11 +20,18 @@
 // FS represents the pseudo-filesystem sys, which provides an interface to
 // kernel data structures.
 type FS struct {
-	proc fs.FS
+	proc   fs.FS
+	isReal bool
 }
 
-// DefaultMountPoint is the common mount point of the proc filesystem.
-const DefaultMountPoint = fs.DefaultProcMountPoint
+const (
+	// DefaultMountPoint is the common mount point of the proc filesystem.
+	DefaultMountPoint = fs.DefaultProcMountPoint
+
+	// SectorSize represents the size of a sector in bytes.
+	// It is specific to Linux block I/O operations.
+	SectorSize = 512
+)
 
 // NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
 // It will error if the mount point directory can't be read or is a file.
@@ -39,5 +46,11 @@
 	if err != nil {
 		return FS{}, err
 	}
-	return FS{fs}, nil
+
+	isReal, err := isRealProc(mountPoint)
+	if err != nil {
+		return FS{}, err
+	}
+
+	return FS{fs, isReal}, nil
 }
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
new file mode 100644
index 0000000..1b5bdbd
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
@@ -0,0 +1,23 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !freebsd && !linux
+// +build !freebsd,!linux
+
+package procfs
+
+// isRealProc returns true on architectures that don't have a Type argument
+// in their Statfs_t struct.
+func isRealProc(_ string) (bool, error) {
+	return true, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go
new file mode 100644
index 0000000..80df79c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go
@@ -0,0 +1,33 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build freebsd || linux
+// +build freebsd linux
+
+package procfs
+
+import (
+	"syscall"
+)
+
+// isRealProc determines whether supplied mountpoint is really a proc filesystem.
+func isRealProc(mountPoint string) (bool, error) {
+	stat := syscall.Statfs_t{}
+	err := syscall.Statfs(mountPoint, &stat)
+	if err != nil {
+		return false, err
+	}
+
+	// 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87
+	return stat.Type == 0x9fa0, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go
index f8070e6..7db8633 100644
--- a/vendor/github.com/prometheus/procfs/fscache.go
+++ b/vendor/github.com/prometheus/procfs/fscache.go
@@ -162,7 +162,7 @@
 	ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
 	// Number of release reqs ignored due to in-progress store
 	ReleaseRequestsIgnoredDueToInProgressStore uint64
-	// Number of page stores cancelled due to release req
+	// Number of page stores canceled due to release req
 	PageStoresCancelledByReleaseRequests uint64
 	VmscanWaiting                        uint64
 	// Number of times async ops added to pending queues
@@ -171,11 +171,11 @@
 	OpsRunning uint64
 	// Number of times async ops queued for processing
 	OpsEnqueued uint64
-	// Number of async ops cancelled
+	// Number of async ops canceled
 	OpsCancelled uint64
 	// Number of async ops rejected due to object lookup/create failure
 	OpsRejected uint64
-	// Number of async ops initialised
+	// Number of async ops initialized
 	OpsInitialised uint64
 	// Number of async ops queued for deferred release
 	OpsDeferred uint64
@@ -236,7 +236,7 @@
 
 	m, err := parseFscacheinfo(bytes.NewReader(b))
 	if err != nil {
-		return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err)
+		return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err)
 	}
 
 	return *m, nil
@@ -245,7 +245,7 @@
 func setFSCacheFields(fields []string, setFields ...*uint64) error {
 	var err error
 	if len(fields) < len(setFields) {
-		return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields))
+		return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
 	}
 
 	for i := range setFields {
@@ -263,7 +263,7 @@
 	for s.Scan() {
 		fields := strings.Fields(s.Text())
 		if len(fields) < 2 {
-			return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text())
+			return nil, fmt.Errorf("%w: malformed Fscacheinfo line: %q", ErrFileParse, s.Text())
 		}
 
 		switch fields[0] {
diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
index 0040753..3a43e83 100644
--- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go
+++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
@@ -26,8 +26,11 @@
 	// DefaultSysMountPoint is the common mount point of the sys filesystem.
 	DefaultSysMountPoint = "/sys"
 
-	// DefaultConfigfsMountPoint is the common mount point of the configfs
+	// DefaultConfigfsMountPoint is the common mount point of the configfs.
 	DefaultConfigfsMountPoint = "/sys/kernel/config"
+
+	// DefaultSelinuxMountPoint is the common mount point of the selinuxfs.
+	DefaultSelinuxMountPoint = "/sys/fs/selinux"
 )
 
 // FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
index 22cb07a..5a7d2df 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/parse.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go
@@ -14,7 +14,8 @@
 package util
 
 import (
-	"io/ioutil"
+	"errors"
+	"os"
 	"strconv"
 	"strings"
 )
@@ -64,9 +65,24 @@
 	return us, nil
 }
 
+// Parses a uint64 from given hex in string.
+func ParseHexUint64s(ss []string) ([]*uint64, error) {
+	us := make([]*uint64, 0, len(ss))
+	for _, s := range ss {
+		u, err := strconv.ParseUint(s, 16, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		us = append(us, &u)
+	}
+
+	return us, nil
+}
+
 // ReadUintFromFile reads a file and attempts to parse a uint64 from it.
 func ReadUintFromFile(path string) (uint64, error) {
-	data, err := ioutil.ReadFile(path)
+	data, err := os.ReadFile(path)
 	if err != nil {
 		return 0, err
 	}
@@ -75,7 +91,7 @@
 
 // ReadIntFromFile reads a file and attempts to parse a int64 from it.
 func ReadIntFromFile(path string) (int64, error) {
-	data, err := ioutil.ReadFile(path)
+	data, err := os.ReadFile(path)
 	if err != nil {
 		return 0, err
 	}
@@ -95,3 +111,16 @@
 	}
 	return &truth
 }
+
+// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX.
+func ReadHexFromFile(path string) (uint64, error) {
+	data, err := os.ReadFile(path)
+	if err != nil {
+		return 0, err
+	}
+	hexString := strings.TrimSpace(string(data))
+	if !strings.HasPrefix(hexString, "0x") {
+		return 0, errors.New("invalid format: hex string does not start with '0x'")
+	}
+	return strconv.ParseUint(hexString[2:], 16, 64)
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go
index 8051161..71b7a70 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/readfile.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/readfile.go
@@ -15,17 +15,16 @@
 
 import (
 	"io"
-	"io/ioutil"
 	"os"
 )
 
-// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file.
-// This is similar to ioutil.ReadFile but without the call to os.Stat, because
+// ReadFileNoStat uses io.ReadAll to read contents of entire file.
+// This is similar to os.ReadFile but without the call to os.Stat, because
 // many files in /proc and /sys report incorrect file sizes (either 0 or 4096).
-// Reads a max file size of 512kB.  For files larger than this, a scanner
+// Reads a max file size of 1024kB.  For files larger than this, a scanner
 // should be used.
 func ReadFileNoStat(filename string) ([]byte, error) {
-	const maxBufferSize = 1024 * 512
+	const maxBufferSize = 1024 * 1024
 
 	f, err := os.Open(filename)
 	if err != nil {
@@ -34,5 +33,5 @@
 	defer f.Close()
 
 	reader := io.LimitReader(f, maxBufferSize)
-	return ioutil.ReadAll(reader)
+	return io.ReadAll(reader)
 }
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
index c07de0b..d5404a6 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
@@ -11,17 +11,21 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// +build linux,!appengine
+//go:build (linux || darwin) && !appengine
+// +build linux darwin
+// +build !appengine
 
 package util
 
 import (
 	"bytes"
 	"os"
+	"strconv"
+	"strings"
 	"syscall"
 )
 
-// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
+// SysReadFile is a simplified os.ReadFile that invokes syscall.Read directly.
 // https://github.com/prometheus/node_exporter/pull/728/files
 //
 // Note that this function will not read files larger than 128 bytes.
@@ -33,7 +37,7 @@
 	defer f.Close()
 
 	// On some machines, hwmon drivers are broken and return EAGAIN.  This causes
-	// Go's ioutil.ReadFile implementation to poll forever.
+	// Go's os.ReadFile implementation to poll forever.
 	//
 	// Since we either want to read data or bail immediately, do the simplest
 	// possible read using syscall directly.
@@ -46,3 +50,21 @@
 
 	return string(bytes.TrimSpace(b[:n])), nil
 }
+
+// SysReadUintFromFile reads a file using SysReadFile and attempts to parse a uint64 from it.
+func SysReadUintFromFile(path string) (uint64, error) {
+	data, err := SysReadFile(path)
+	if err != nil {
+		return 0, err
+	}
+	return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
+}
+
+// SysReadIntFromFile reads a file using SysReadFile and attempts to parse a int64 from it.
+func SysReadIntFromFile(path string) (int64, error) {
+	data, err := SysReadFile(path)
+	if err != nil {
+		return 0, err
+	}
+	return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
index bd55b45..1d86f5e 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
@@ -11,7 +11,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// +build linux,appengine !linux
+//go:build (linux && appengine) || (!linux && !darwin)
+// +build linux,appengine !linux,!darwin
 
 package util
 
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
index 89e4477..bc3a20c 100644
--- a/vendor/github.com/prometheus/procfs/ipvs.go
+++ b/vendor/github.com/prometheus/procfs/ipvs.go
@@ -20,7 +20,6 @@
 	"errors"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"net"
 	"os"
 	"strconv"
@@ -84,7 +83,7 @@
 		stats       IPVSStats
 	)
 
-	statContent, err := ioutil.ReadAll(r)
+	statContent, err := io.ReadAll(r)
 	if err != nil {
 		return IPVSStats{}, err
 	}
@@ -222,15 +221,16 @@
 	case 46:
 		ip = net.ParseIP(s[1:40])
 		if ip == nil {
-			return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40])
+			return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
 		}
 	default:
-		return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s)
+		return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
 	}
 
 	portString := s[len(s)-4:]
 	if len(portString) != 4 {
-		return nil, 0, fmt.Errorf("unexpected port string format: %s", portString)
+		return nil, 0,
+			fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err)
 	}
 	port, err := strconv.ParseUint(portString, 16, 16)
 	if err != nil {
diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go
index da3a941..db88566 100644
--- a/vendor/github.com/prometheus/procfs/kernel_random.go
+++ b/vendor/github.com/prometheus/procfs/kernel_random.go
@@ -11,6 +11,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+//go:build !windows
 // +build !windows
 
 package procfs
diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go
index 0cce190..332e76c 100644
--- a/vendor/github.com/prometheus/procfs/loadavg.go
+++ b/vendor/github.com/prometheus/procfs/loadavg.go
@@ -21,7 +21,7 @@
 	"github.com/prometheus/procfs/internal/util"
 )
 
-// LoadAvg represents an entry in /proc/loadavg
+// LoadAvg represents an entry in /proc/loadavg.
 type LoadAvg struct {
 	Load1  float64
 	Load5  float64
@@ -44,14 +44,14 @@
 	loads := make([]float64, 3)
 	parts := strings.Fields(string(loadavgBytes))
 	if len(parts) < 3 {
-		return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes))
+		return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, string(loadavgBytes))
 	}
 
 	var err error
 	for i, load := range parts[0:3] {
 		loads[i], err = strconv.ParseFloat(load, 64)
 		if err != nil {
-			return nil, fmt.Errorf("could not parse load %q: %w", load, err)
+			return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
 		}
 	}
 	return &LoadAvg{
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
index 4c4493b..67a9d2b 100644
--- a/vendor/github.com/prometheus/procfs/mdstat.go
+++ b/vendor/github.com/prometheus/procfs/mdstat.go
@@ -15,16 +15,19 @@
 
 import (
 	"fmt"
-	"io/ioutil"
+	"os"
 	"regexp"
 	"strconv"
 	"strings"
 )
 
 var (
-	statusLineRE      = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
-	recoveryLineRE    = regexp.MustCompile(`\((\d+)/\d+\)`)
-	componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`)
+	statusLineRE         = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`)
+	recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`)
+	recoveryLinePctRE    = regexp.MustCompile(`= (.+)%`)
+	recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`)
+	recoveryLineSpeedRE  = regexp.MustCompile(`speed=(.+)[A-Z]`)
+	componentDeviceRE    = regexp.MustCompile(`(.*)\[\d+\]`)
 )
 
 // MDStat holds info parsed from /proc/mdstat.
@@ -39,12 +42,22 @@
 	DisksTotal int64
 	// Number of failed disks.
 	DisksFailed int64
+	// Number of "down" disks. (the _ indicator in the status line)
+	DisksDown int64
 	// Spare disks in the device.
 	DisksSpare int64
 	// Number of blocks the device holds.
 	BlocksTotal int64
 	// Number of blocks on the device that are in sync.
 	BlocksSynced int64
+	// Number of blocks on the device that need to be synced.
+	BlocksToBeSynced int64
+	// progress percentage of current sync
+	BlocksSyncedPct float64
+	// estimated finishing time for current sync (in minutes)
+	BlocksSyncedFinishTime float64
+	// current sync speed (in Kilobytes/sec)
+	BlocksSyncedSpeed float64
 	// Name of md component devices
 	Devices []string
 }
@@ -53,13 +66,13 @@
 // structs containing the relevant info.  More information available here:
 // https://raid.wiki.kernel.org/index.php/Mdstat
 func (fs FS) MDStat() ([]MDStat, error) {
-	data, err := ioutil.ReadFile(fs.proc.Path("mdstat"))
+	data, err := os.ReadFile(fs.proc.Path("mdstat"))
 	if err != nil {
 		return nil, err
 	}
 	mdstat, err := parseMDStat(data)
 	if err != nil {
-		return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err)
+		return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
 	}
 	return mdstat, nil
 }
@@ -79,22 +92,22 @@
 
 		deviceFields := strings.Fields(line)
 		if len(deviceFields) < 3 {
-			return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line)
+			return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line)
 		}
 		mdName := deviceFields[0] // mdx
 		state := deviceFields[2]  // active or inactive
 
 		if len(lines) <= i+3 {
-			return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName)
+			return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName)
 		}
 
 		// Failed disks have the suffix (F) & Spare disks have the suffix (S).
 		fail := int64(strings.Count(line, "(F)"))
 		spare := int64(strings.Count(line, "(S)"))
-		active, total, size, err := evalStatusLine(lines[i], lines[i+1])
+		active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
 
 		if err != nil {
-			return nil, fmt.Errorf("error parsing md device lines: %w", err)
+			return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
 		}
 
 		syncLineIdx := i + 2
@@ -104,7 +117,11 @@
 
 		// If device is syncing at the moment, get the number of currently
 		// synced bytes, otherwise that number equals the size of the device.
-		syncedBlocks := size
+		blocksSynced := size
+		blocksToBeSynced := size
+		speed := float64(0)
+		finish := float64(0)
+		pct := float64(0)
 		recovering := strings.Contains(lines[syncLineIdx], "recovery")
 		resyncing := strings.Contains(lines[syncLineIdx], "resync")
 		checking := strings.Contains(lines[syncLineIdx], "check")
@@ -122,79 +139,125 @@
 			// Handle case when resync=PENDING or resync=DELAYED.
 			if strings.Contains(lines[syncLineIdx], "PENDING") ||
 				strings.Contains(lines[syncLineIdx], "DELAYED") {
-				syncedBlocks = 0
+				blocksSynced = 0
 			} else {
-				syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx])
+				blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
 				if err != nil {
-					return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err)
+					return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
 				}
 			}
 		}
 
 		mdStats = append(mdStats, MDStat{
-			Name:          mdName,
-			ActivityState: state,
-			DisksActive:   active,
-			DisksFailed:   fail,
-			DisksSpare:    spare,
-			DisksTotal:    total,
-			BlocksTotal:   size,
-			BlocksSynced:  syncedBlocks,
-			Devices:       evalComponentDevices(deviceFields),
+			Name:                   mdName,
+			ActivityState:          state,
+			DisksActive:            active,
+			DisksFailed:            fail,
+			DisksDown:              down,
+			DisksSpare:             spare,
+			DisksTotal:             total,
+			BlocksTotal:            size,
+			BlocksSynced:           blocksSynced,
+			BlocksToBeSynced:       blocksToBeSynced,
+			BlocksSyncedPct:        pct,
+			BlocksSyncedFinishTime: finish,
+			BlocksSyncedSpeed:      speed,
+			Devices:                evalComponentDevices(deviceFields),
 		})
 	}
 
 	return mdStats, nil
 }
 
-func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, err error) {
+func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
+	statusFields := strings.Fields(statusLine)
+	if len(statusFields) < 1 {
+		return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
+	}
 
-	sizeStr := strings.Fields(statusLine)[0]
+	sizeStr := statusFields[0]
 	size, err = strconv.ParseInt(sizeStr, 10, 64)
 	if err != nil {
-		return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
+		return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
 	}
 
 	if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
 		// In the device deviceLine, only disks have a number associated with them in [].
 		total = int64(strings.Count(deviceLine, "["))
-		return total, total, size, nil
+		return total, total, 0, size, nil
 	}
 
 	if strings.Contains(deviceLine, "inactive") {
-		return 0, 0, size, nil
+		return 0, 0, 0, size, nil
 	}
 
 	matches := statusLineRE.FindStringSubmatch(statusLine)
-	if len(matches) != 4 {
-		return 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine)
+	if len(matches) != 5 {
+		return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
 	}
 
 	total, err = strconv.ParseInt(matches[2], 10, 64)
 	if err != nil {
-		return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
+		return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
 	}
 
 	active, err = strconv.ParseInt(matches[3], 10, 64)
 	if err != nil {
-		return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
+		return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err)
 	}
+	down = int64(strings.Count(matches[4], "_"))
 
-	return active, total, size, nil
+	return active, total, down, size, nil
 }
 
-func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) {
-	matches := recoveryLineRE.FindStringSubmatch(recoveryLine)
+func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) {
+	matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
 	if len(matches) != 2 {
-		return 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine)
+		return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err)
 	}
 
-	syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
+	blocks := strings.Split(matches[1], "/")
+	blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64)
 	if err != nil {
-		return 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err)
+		return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err)
 	}
 
-	return syncedBlocks, nil
+	blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64)
+	if err != nil {
+		return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err)
+	}
+
+	// Get percentage complete
+	matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
+	if len(matches) != 2 {
+		return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
+	}
+	pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
+	if err != nil {
+		return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
+	}
+
+	// Get time expected left to complete
+	matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
+	if len(matches) != 2 {
+		return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
+	}
+	finish, err = strconv.ParseFloat(matches[1], 64)
+	if err != nil {
+		return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
+	}
+
+	// Get recovery speed
+	matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
+	if len(matches) != 2 {
+		return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
+	}
+	speed, err = strconv.ParseFloat(matches[1], 64)
+	if err != nil {
+		return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
+	}
+
+	return blocksSynced, blocksToBeSynced, pct, finish, speed, nil
 }
 
 func evalComponentDevices(deviceFields []string) []string {
diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go
index f65e174..4b2c405 100644
--- a/vendor/github.com/prometheus/procfs/meminfo.go
+++ b/vendor/github.com/prometheus/procfs/meminfo.go
@@ -126,6 +126,7 @@
 	VmallocUsed *uint64
 	// largest contiguous block of vmalloc area which is free
 	VmallocChunk      *uint64
+	Percpu            *uint64
 	HardwareCorrupted *uint64
 	AnonHugePages     *uint64
 	ShmemHugePages    *uint64
@@ -140,6 +141,55 @@
 	DirectMap4k       *uint64
 	DirectMap2M       *uint64
 	DirectMap1G       *uint64
+
+	// The struct fields below are the byte-normalized counterparts to the
+	// existing struct fields. Values are normalized using the optional
+	// unit field in the meminfo line.
+	MemTotalBytes          *uint64
+	MemFreeBytes           *uint64
+	MemAvailableBytes      *uint64
+	BuffersBytes           *uint64
+	CachedBytes            *uint64
+	SwapCachedBytes        *uint64
+	ActiveBytes            *uint64
+	InactiveBytes          *uint64
+	ActiveAnonBytes        *uint64
+	InactiveAnonBytes      *uint64
+	ActiveFileBytes        *uint64
+	InactiveFileBytes      *uint64
+	UnevictableBytes       *uint64
+	MlockedBytes           *uint64
+	SwapTotalBytes         *uint64
+	SwapFreeBytes          *uint64
+	DirtyBytes             *uint64
+	WritebackBytes         *uint64
+	AnonPagesBytes         *uint64
+	MappedBytes            *uint64
+	ShmemBytes             *uint64
+	SlabBytes              *uint64
+	SReclaimableBytes      *uint64
+	SUnreclaimBytes        *uint64
+	KernelStackBytes       *uint64
+	PageTablesBytes        *uint64
+	NFSUnstableBytes       *uint64
+	BounceBytes            *uint64
+	WritebackTmpBytes      *uint64
+	CommitLimitBytes       *uint64
+	CommittedASBytes       *uint64
+	VmallocTotalBytes      *uint64
+	VmallocUsedBytes       *uint64
+	VmallocChunkBytes      *uint64
+	PercpuBytes            *uint64
+	HardwareCorruptedBytes *uint64
+	AnonHugePagesBytes     *uint64
+	ShmemHugePagesBytes    *uint64
+	ShmemPmdMappedBytes    *uint64
+	CmaTotalBytes          *uint64
+	CmaFreeBytes           *uint64
+	HugepagesizeBytes      *uint64
+	DirectMap4kBytes       *uint64
+	DirectMap2MBytes       *uint64
+	DirectMap1GBytes       *uint64
 }
 
 // Meminfo returns an information about current kernel/system memory statistics.
@@ -152,7 +202,7 @@
 
 	m, err := parseMemInfo(bytes.NewReader(b))
 	if err != nil {
-		return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err)
+		return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err)
 	}
 
 	return *m, nil
@@ -162,114 +212,176 @@
 	var m Meminfo
 	s := bufio.NewScanner(r)
 	for s.Scan() {
-		// Each line has at least a name and value; we ignore the unit.
 		fields := strings.Fields(s.Text())
-		if len(fields) < 2 {
-			return nil, fmt.Errorf("malformed meminfo line: %q", s.Text())
-		}
+		var val, valBytes uint64
 
-		v, err := strconv.ParseUint(fields[1], 0, 64)
+		val, err := strconv.ParseUint(fields[1], 0, 64)
 		if err != nil {
 			return nil, err
 		}
 
+		switch len(fields) {
+		case 2:
+			// No unit present, use the parsed the value as bytes directly.
+			valBytes = val
+		case 3:
+			// Unit present in optional 3rd field, convert it to
+			// bytes. The only unit supported within the Linux
+			// kernel is `kB`.
+			if fields[2] != "kB" {
+				return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2])
+			}
+
+			valBytes = 1024 * val
+
+		default:
+			return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
+		}
+
 		switch fields[0] {
 		case "MemTotal:":
-			m.MemTotal = &v
+			m.MemTotal = &val
+			m.MemTotalBytes = &valBytes
 		case "MemFree:":
-			m.MemFree = &v
+			m.MemFree = &val
+			m.MemFreeBytes = &valBytes
 		case "MemAvailable:":
-			m.MemAvailable = &v
+			m.MemAvailable = &val
+			m.MemAvailableBytes = &valBytes
 		case "Buffers:":
-			m.Buffers = &v
+			m.Buffers = &val
+			m.BuffersBytes = &valBytes
 		case "Cached:":
-			m.Cached = &v
+			m.Cached = &val
+			m.CachedBytes = &valBytes
 		case "SwapCached:":
-			m.SwapCached = &v
+			m.SwapCached = &val
+			m.SwapCachedBytes = &valBytes
 		case "Active:":
-			m.Active = &v
+			m.Active = &val
+			m.ActiveBytes = &valBytes
 		case "Inactive:":
-			m.Inactive = &v
+			m.Inactive = &val
+			m.InactiveBytes = &valBytes
 		case "Active(anon):":
-			m.ActiveAnon = &v
+			m.ActiveAnon = &val
+			m.ActiveAnonBytes = &valBytes
 		case "Inactive(anon):":
-			m.InactiveAnon = &v
+			m.InactiveAnon = &val
+			m.InactiveAnonBytes = &valBytes
 		case "Active(file):":
-			m.ActiveFile = &v
+			m.ActiveFile = &val
+			m.ActiveFileBytes = &valBytes
 		case "Inactive(file):":
-			m.InactiveFile = &v
+			m.InactiveFile = &val
+			m.InactiveFileBytes = &valBytes
 		case "Unevictable:":
-			m.Unevictable = &v
+			m.Unevictable = &val
+			m.UnevictableBytes = &valBytes
 		case "Mlocked:":
-			m.Mlocked = &v
+			m.Mlocked = &val
+			m.MlockedBytes = &valBytes
 		case "SwapTotal:":
-			m.SwapTotal = &v
+			m.SwapTotal = &val
+			m.SwapTotalBytes = &valBytes
 		case "SwapFree:":
-			m.SwapFree = &v
+			m.SwapFree = &val
+			m.SwapFreeBytes = &valBytes
 		case "Dirty:":
-			m.Dirty = &v
+			m.Dirty = &val
+			m.DirtyBytes = &valBytes
 		case "Writeback:":
-			m.Writeback = &v
+			m.Writeback = &val
+			m.WritebackBytes = &valBytes
 		case "AnonPages:":
-			m.AnonPages = &v
+			m.AnonPages = &val
+			m.AnonPagesBytes = &valBytes
 		case "Mapped:":
-			m.Mapped = &v
+			m.Mapped = &val
+			m.MappedBytes = &valBytes
 		case "Shmem:":
-			m.Shmem = &v
+			m.Shmem = &val
+			m.ShmemBytes = &valBytes
 		case "Slab:":
-			m.Slab = &v
+			m.Slab = &val
+			m.SlabBytes = &valBytes
 		case "SReclaimable:":
-			m.SReclaimable = &v
+			m.SReclaimable = &val
+			m.SReclaimableBytes = &valBytes
 		case "SUnreclaim:":
-			m.SUnreclaim = &v
+			m.SUnreclaim = &val
+			m.SUnreclaimBytes = &valBytes
 		case "KernelStack:":
-			m.KernelStack = &v
+			m.KernelStack = &val
+			m.KernelStackBytes = &valBytes
 		case "PageTables:":
-			m.PageTables = &v
+			m.PageTables = &val
+			m.PageTablesBytes = &valBytes
 		case "NFS_Unstable:":
-			m.NFSUnstable = &v
+			m.NFSUnstable = &val
+			m.NFSUnstableBytes = &valBytes
 		case "Bounce:":
-			m.Bounce = &v
+			m.Bounce = &val
+			m.BounceBytes = &valBytes
 		case "WritebackTmp:":
-			m.WritebackTmp = &v
+			m.WritebackTmp = &val
+			m.WritebackTmpBytes = &valBytes
 		case "CommitLimit:":
-			m.CommitLimit = &v
+			m.CommitLimit = &val
+			m.CommitLimitBytes = &valBytes
 		case "Committed_AS:":
-			m.CommittedAS = &v
+			m.CommittedAS = &val
+			m.CommittedASBytes = &valBytes
 		case "VmallocTotal:":
-			m.VmallocTotal = &v
+			m.VmallocTotal = &val
+			m.VmallocTotalBytes = &valBytes
 		case "VmallocUsed:":
-			m.VmallocUsed = &v
+			m.VmallocUsed = &val
+			m.VmallocUsedBytes = &valBytes
 		case "VmallocChunk:":
-			m.VmallocChunk = &v
+			m.VmallocChunk = &val
+			m.VmallocChunkBytes = &valBytes
+		case "Percpu:":
+			m.Percpu = &val
+			m.PercpuBytes = &valBytes
 		case "HardwareCorrupted:":
-			m.HardwareCorrupted = &v
+			m.HardwareCorrupted = &val
+			m.HardwareCorruptedBytes = &valBytes
 		case "AnonHugePages:":
-			m.AnonHugePages = &v
+			m.AnonHugePages = &val
+			m.AnonHugePagesBytes = &valBytes
 		case "ShmemHugePages:":
-			m.ShmemHugePages = &v
+			m.ShmemHugePages = &val
+			m.ShmemHugePagesBytes = &valBytes
 		case "ShmemPmdMapped:":
-			m.ShmemPmdMapped = &v
+			m.ShmemPmdMapped = &val
+			m.ShmemPmdMappedBytes = &valBytes
 		case "CmaTotal:":
-			m.CmaTotal = &v
+			m.CmaTotal = &val
+			m.CmaTotalBytes = &valBytes
 		case "CmaFree:":
-			m.CmaFree = &v
+			m.CmaFree = &val
+			m.CmaFreeBytes = &valBytes
 		case "HugePages_Total:":
-			m.HugePagesTotal = &v
+			m.HugePagesTotal = &val
 		case "HugePages_Free:":
-			m.HugePagesFree = &v
+			m.HugePagesFree = &val
 		case "HugePages_Rsvd:":
-			m.HugePagesRsvd = &v
+			m.HugePagesRsvd = &val
 		case "HugePages_Surp:":
-			m.HugePagesSurp = &v
+			m.HugePagesSurp = &val
 		case "Hugepagesize:":
-			m.Hugepagesize = &v
+			m.Hugepagesize = &val
+			m.HugepagesizeBytes = &valBytes
 		case "DirectMap4k:":
-			m.DirectMap4k = &v
+			m.DirectMap4k = &val
+			m.DirectMap4kBytes = &valBytes
 		case "DirectMap2M:":
-			m.DirectMap2M = &v
+			m.DirectMap2M = &val
+			m.DirectMap2MBytes = &valBytes
 		case "DirectMap1G:":
-			m.DirectMap1G = &v
+			m.DirectMap1G = &val
+			m.DirectMap1GBytes = &valBytes
 		}
 	}
 
diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go
index 59f4d50..a704c5e 100644
--- a/vendor/github.com/prometheus/procfs/mountinfo.go
+++ b/vendor/github.com/prometheus/procfs/mountinfo.go
@@ -78,11 +78,11 @@
 	mountInfo := strings.Split(mountString, " ")
 	mountInfoLength := len(mountInfo)
 	if mountInfoLength < 10 {
-		return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString)
+		return nil, fmt.Errorf("%w: Too few fields in mount string: %s", ErrFileParse, mountString)
 	}
 
 	if mountInfo[mountInfoLength-4] != "-" {
-		return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4])
+		return nil, fmt.Errorf("%w: couldn't find separator in expected field: %s", ErrFileParse, mountInfo[mountInfoLength-4])
 	}
 
 	mount := &MountInfo{
@@ -98,18 +98,18 @@
 
 	mount.MountID, err = strconv.Atoi(mountInfo[0])
 	if err != nil {
-		return nil, fmt.Errorf("failed to parse mount ID")
+		return nil, fmt.Errorf("%w: mount ID: %q", ErrFileParse, mount.MountID)
 	}
 	mount.ParentID, err = strconv.Atoi(mountInfo[1])
 	if err != nil {
-		return nil, fmt.Errorf("failed to parse parent ID")
+		return nil, fmt.Errorf("%w: parent ID: %q", ErrFileParse, mount.ParentID)
 	}
 	// Has optional fields, which is a space separated list of values.
 	// Example: shared:2 master:7
 	if mountInfo[6] != "" {
 		mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
 		if err != nil {
-			return nil, err
+			return nil, fmt.Errorf("%w: %w", ErrFileParse, err)
 		}
 	}
 	return mount, nil
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
index f7a828b..50caa73 100644
--- a/vendor/github.com/prometheus/procfs/mountstats.go
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -44,6 +44,14 @@
 
 	fieldTransport11TCPLen = 13
 	fieldTransport11UDPLen = 10
+
+	// Kernel version >= 4.14 MaxLen
+	// See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393
+	fieldTransport11RDMAMaxLen = 28
+
+	// Kernel version <= 4.2 MinLen
+	// See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331
+	fieldTransport11RDMAMinLen = 20
 )
 
 // A Mount is a device mount parsed from /proc/[pid]/mountstats.
@@ -80,7 +88,7 @@
 	// Statistics broken down by filesystem operation.
 	Operations []NFSOperationStats
 	// Statistics about the NFS RPC transport.
-	Transport NFSTransportStats
+	Transport []NFSTransportStats
 }
 
 // mountStats implements MountStats.
@@ -231,6 +239,33 @@
 	// A running counter, incremented on each request as the current size of the
 	// pending queue.
 	CumulativePendingQueue uint64
+
+	// Stats below only available with stat version 1.1.
+	// Transport over RDMA
+
+	// accessed when sending a call
+	ReadChunkCount   uint64
+	WriteChunkCount  uint64
+	ReplyChunkCount  uint64
+	TotalRdmaRequest uint64
+
+	// rarely accessed error counters
+	PullupCopyCount      uint64
+	HardwayRegisterCount uint64
+	FailedMarshalCount   uint64
+	BadReplyCount        uint64
+	MrsRecovered         uint64
+	MrsOrphaned          uint64
+	MrsAllocated         uint64
+	EmptySendctxQ        uint64
+
+	// accessed when receiving a reply
+	TotalRdmaReply    uint64
+	FixupCopyCount    uint64
+	ReplyWaitsForSend uint64
+	LocalInvNeeded    uint64
+	NomsgCallCount    uint64
+	BcallCount        uint64
 }
 
 // parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
@@ -264,7 +299,7 @@
 		if len(ss) > deviceEntryLen {
 			// Only NFSv3 and v4 are supported for parsing statistics
 			if m.Type != nfs3Type && m.Type != nfs4Type {
-				return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type)
+				return nil, fmt.Errorf("%w: Cannot parse MountStats for %q", ErrFileParse, m.Type)
 			}
 
 			statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
@@ -284,10 +319,11 @@
 }
 
 // parseMount parses an entry in /proc/[pid]/mountstats in the format:
-//   device [device] mounted on [mount] with fstype [type]
+//
+//	device [device] mounted on [mount] with fstype [type]
 func parseMount(ss []string) (*Mount, error) {
 	if len(ss) < deviceEntryLen {
-		return nil, fmt.Errorf("invalid device entry: %v", ss)
+		return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
 	}
 
 	// Check for specific words appearing at specific indices to ensure
@@ -305,7 +341,7 @@
 
 	for _, f := range format {
 		if ss[f.i] != f.s {
-			return nil, fmt.Errorf("invalid device entry: %v", ss)
+			return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
 		}
 	}
 
@@ -342,7 +378,7 @@
 		switch ss[0] {
 		case fieldOpts:
 			if len(ss) < 2 {
-				return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+				return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
 			}
 			if stats.Opts == nil {
 				stats.Opts = map[string]string{}
@@ -357,7 +393,7 @@
 			}
 		case fieldAge:
 			if len(ss) < 2 {
-				return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+				return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
 			}
 			// Age integer is in seconds
 			d, err := time.ParseDuration(ss[1] + "s")
@@ -368,7 +404,7 @@
 			stats.Age = d
 		case fieldBytes:
 			if len(ss) < 2 {
-				return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+				return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
 			}
 			bstats, err := parseNFSBytesStats(ss[1:])
 			if err != nil {
@@ -378,7 +414,7 @@
 			stats.Bytes = *bstats
 		case fieldEvents:
 			if len(ss) < 2 {
-				return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+				return nil, fmt.Errorf("%w: Incomplete information for NFS events: %v", ErrFileParse, ss)
 			}
 			estats, err := parseNFSEventsStats(ss[1:])
 			if err != nil {
@@ -388,7 +424,7 @@
 			stats.Events = *estats
 		case fieldTransport:
 			if len(ss) < 3 {
-				return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
+				return nil, fmt.Errorf("%w: Incomplete information for NFS transport stats: %v", ErrFileParse, ss)
 			}
 
 			tstats, err := parseNFSTransportStats(ss[1:], statVersion)
@@ -396,7 +432,7 @@
 				return nil, err
 			}
 
-			stats.Transport = *tstats
+			stats.Transport = append(stats.Transport, *tstats)
 		}
 
 		// When encountering "per-operation statistics", we must break this
@@ -427,7 +463,7 @@
 // integer fields.
 func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
 	if len(ss) != fieldBytesLen {
-		return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
+		return nil, fmt.Errorf("%w: Invalid NFS bytes stats: %v", ErrFileParse, ss)
 	}
 
 	ns := make([]uint64, 0, fieldBytesLen)
@@ -456,7 +492,7 @@
 // integer fields.
 func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
 	if len(ss) != fieldEventsLen {
-		return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
+		return nil, fmt.Errorf("%w: invalid NFS events stats: %v", ErrFileParse, ss)
 	}
 
 	ns := make([]uint64, 0, fieldEventsLen)
@@ -520,7 +556,7 @@
 		}
 
 		if len(ss) < minFields {
-			return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
+			return nil, fmt.Errorf("%w: invalid NFS per-operations stats: %v", ErrFileParse, ss)
 		}
 
 		// Skip string operation name for integers
@@ -533,7 +569,6 @@
 
 			ns = append(ns, n)
 		}
-
 		opStats := NFSOperationStats{
 			Operation:                           strings.TrimSuffix(ss[0], ":"),
 			Requests:                            ns[0],
@@ -566,30 +601,35 @@
 	switch statVersion {
 	case statVersion10:
 		var expectedLength int
-		if protocol == "tcp" {
+		switch protocol {
+		case "tcp":
 			expectedLength = fieldTransport10TCPLen
-		} else if protocol == "udp" {
+		case "udp":
 			expectedLength = fieldTransport10UDPLen
-		} else {
-			return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss)
+		default:
+			return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss)
 		}
 		if len(ss) != expectedLength {
-			return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
+			return nil, fmt.Errorf("%w: Invalid NFS transport stats 1.0 statement: %v", ErrFileParse, ss)
 		}
 	case statVersion11:
 		var expectedLength int
-		if protocol == "tcp" {
+		switch protocol {
+		case "tcp":
 			expectedLength = fieldTransport11TCPLen
-		} else if protocol == "udp" {
+		case "udp":
 			expectedLength = fieldTransport11UDPLen
-		} else {
-			return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss)
+		case "rdma":
+			expectedLength = fieldTransport11RDMAMinLen
+		default:
+			return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss)
 		}
-		if len(ss) != expectedLength {
-			return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
+		if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) ||
+			(protocol == "rdma" && len(ss) < expectedLength) {
+			return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol)
 		}
 	default:
-		return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion)
+		return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
 	}
 
 	// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
@@ -599,7 +639,9 @@
 	// Note: slice length must be set to length of v1.1 stats to avoid a panic when
 	// only v1.0 stats are present.
 	// See: https://github.com/prometheus/node_exporter/issues/571.
-	ns := make([]uint64, fieldTransport11TCPLen)
+	//
+	// Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen
+	ns := make([]uint64, fieldTransport11RDMAMaxLen+3)
 	for i, s := range ss {
 		n, err := strconv.ParseUint(s, 10, 64)
 		if err != nil {
@@ -615,11 +657,17 @@
 	// For the udp RPC transport there is no connection count, connect idle time,
 	// or idle time (fields #3, #4, and #5); all other fields are the same. So
 	// we set them to 0 here.
-	if protocol == "udp" {
+	switch protocol {
+	case "udp":
 		ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
+	case "tcp":
+		ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...)
+	case "rdma":
+		ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...)
 	}
 
 	return &NFSTransportStats{
+		// NFS xprt over tcp or udp
 		Protocol:                 protocol,
 		Port:                     ns[0],
 		Bind:                     ns[1],
@@ -631,8 +679,32 @@
 		BadTransactionIDs:        ns[7],
 		CumulativeActiveRequests: ns[8],
 		CumulativeBacklog:        ns[9],
-		MaximumRPCSlotsUsed:      ns[10],
-		CumulativeSendingQueue:   ns[11],
-		CumulativePendingQueue:   ns[12],
+
+		// NFS xprt over tcp or udp
+		// And statVersion 1.1
+		MaximumRPCSlotsUsed:    ns[10],
+		CumulativeSendingQueue: ns[11],
+		CumulativePendingQueue: ns[12],
+
+		// NFS xprt over rdma
+		// And stat Version 1.1
+		ReadChunkCount:       ns[13],
+		WriteChunkCount:      ns[14],
+		ReplyChunkCount:      ns[15],
+		TotalRdmaRequest:     ns[16],
+		PullupCopyCount:      ns[17],
+		HardwayRegisterCount: ns[18],
+		FailedMarshalCount:   ns[19],
+		BadReplyCount:        ns[20],
+		MrsRecovered:         ns[21],
+		MrsOrphaned:          ns[22],
+		MrsAllocated:         ns[23],
+		EmptySendctxQ:        ns[24],
+		TotalRdmaReply:       ns[25],
+		FixupCopyCount:       ns[26],
+		ReplyWaitsForSend:    ns[27],
+		LocalInvNeeded:       ns[28],
+		NomsgCallCount:       ns[29],
+		BcallCount:           ns[30],
 	}, nil
 }
diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
index 9964a36..316df5f 100644
--- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go
+++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
@@ -18,19 +18,22 @@
 	"bytes"
 	"fmt"
 	"io"
-	"strconv"
 	"strings"
 
 	"github.com/prometheus/procfs/internal/util"
 )
 
 // A ConntrackStatEntry represents one line from net/stat/nf_conntrack
-// and contains netfilter conntrack statistics at one CPU core
+// and contains netfilter conntrack statistics at one CPU core.
 type ConntrackStatEntry struct {
 	Entries       uint64
+	Searched      uint64
 	Found         uint64
+	New           uint64
 	Invalid       uint64
 	Ignore        uint64
+	Delete        uint64
+	DeleteList    uint64
 	Insert        uint64
 	InsertFailed  uint64
 	Drop          uint64
@@ -38,12 +41,12 @@
 	SearchRestart uint64
 }
 
-// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores
+// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores.
 func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) {
 	return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack"))
 }
 
-// Parses a slice of ConntrackStatEntries from the given filepath
+// Parses a slice of ConntrackStatEntries from the given filepath.
 func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
 	// This file is small and can be read with one syscall.
 	b, err := util.ReadFileNoStat(path)
@@ -55,13 +58,13 @@
 
 	stat, err := parseConntrackStat(bytes.NewReader(b))
 	if err != nil {
-		return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err)
+		return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err)
 	}
 
 	return stat, nil
 }
 
-// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries
+// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries.
 func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
 	var entries []ConntrackStatEntry
 
@@ -79,75 +82,37 @@
 	return entries, nil
 }
 
-// Parses a ConntrackStatEntry from given array of fields
+// Parses a ConntrackStatEntry from given array of fields.
 func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
-	if len(fields) != 17 {
-		return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
-	}
-	entry := &ConntrackStatEntry{}
-
-	entries, err := parseConntrackStatField(fields[0])
+	entries, err := util.ParseHexUint64s(fields)
 	if err != nil {
-		return nil, err
+		return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
 	}
-	entry.Entries = entries
-
-	found, err := parseConntrackStatField(fields[2])
-	if err != nil {
-		return nil, err
+	numEntries := len(entries)
+	if numEntries < 16 || numEntries > 17 {
+		return nil,
+			fmt.Errorf("%w: invalid conntrackstat entry, invalid number of fields: %d", ErrFileParse, numEntries)
 	}
-	entry.Found = found
 
-	invalid, err := parseConntrackStatField(fields[4])
-	if err != nil {
-		return nil, err
+	stats := &ConntrackStatEntry{
+		Entries:      *entries[0],
+		Searched:     *entries[1],
+		Found:        *entries[2],
+		New:          *entries[3],
+		Invalid:      *entries[4],
+		Ignore:       *entries[5],
+		Delete:       *entries[6],
+		DeleteList:   *entries[7],
+		Insert:       *entries[8],
+		InsertFailed: *entries[9],
+		Drop:         *entries[10],
+		EarlyDrop:    *entries[11],
 	}
-	entry.Invalid = invalid
 
-	ignore, err := parseConntrackStatField(fields[5])
-	if err != nil {
-		return nil, err
+	// Ignore missing search_restart on Linux < 2.6.35.
+	if numEntries == 17 {
+		stats.SearchRestart = *entries[16]
 	}
-	entry.Ignore = ignore
 
-	insert, err := parseConntrackStatField(fields[8])
-	if err != nil {
-		return nil, err
-	}
-	entry.Insert = insert
-
-	insertFailed, err := parseConntrackStatField(fields[9])
-	if err != nil {
-		return nil, err
-	}
-	entry.InsertFailed = insertFailed
-
-	drop, err := parseConntrackStatField(fields[10])
-	if err != nil {
-		return nil, err
-	}
-	entry.Drop = drop
-
-	earlyDrop, err := parseConntrackStatField(fields[11])
-	if err != nil {
-		return nil, err
-	}
-	entry.EarlyDrop = earlyDrop
-
-	searchRestart, err := parseConntrackStatField(fields[16])
-	if err != nil {
-		return nil, err
-	}
-	entry.SearchRestart = searchRestart
-
-	return entry, nil
-}
-
-// Parses a uint64 from given hex in string
-func parseConntrackStatField(field string) (uint64, error) {
-	val, err := strconv.ParseUint(field, 16, 64)
-	if err != nil {
-		return 0, fmt.Errorf("couldn't parse %q field: %w", field, err)
-	}
-	return val, err
+	return stats, nil
 }
diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go
index 47a710b..e66208a 100644
--- a/vendor/github.com/prometheus/procfs/net_dev.go
+++ b/vendor/github.com/prometheus/procfs/net_dev.go
@@ -87,17 +87,17 @@
 // parseLine parses a single line from the /proc/net/dev file. Header lines
 // must be filtered prior to calling this method.
 func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) {
-	parts := strings.SplitN(rawLine, ":", 2)
-	if len(parts) != 2 {
+	idx := strings.LastIndex(rawLine, ":")
+	if idx == -1 {
 		return nil, errors.New("invalid net/dev line, missing colon")
 	}
-	fields := strings.Fields(strings.TrimSpace(parts[1]))
+	fields := strings.Fields(strings.TrimSpace(rawLine[idx+1:]))
 
 	var err error
 	line := &NetDevLine{}
 
 	// Interface Name
-	line.Name = strings.TrimSpace(parts[0])
+	line.Name = strings.TrimSpace(rawLine[:idx])
 	if line.Name == "" {
 		return nil, errors.New("invalid net/dev line, empty interface name")
 	}
diff --git a/vendor/github.com/prometheus/procfs/net_dev_snmp6.go b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go
new file mode 100644
index 0000000..f50b38e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go
@@ -0,0 +1,96 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"errors"
+	"io"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc/<PID>/net/dev_snmp6/.
+// The outer map's keys are interface names and the inner map's keys are stat names.
+//
+// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type.
+type NetDevSNMP6 map[string]map[string]uint64
+
+// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/
+// directory.
+func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) {
+	return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6"))
+}
+
+// Returns kernel/system statistics read from interface files within the /proc/<PID>/net/dev_snmp6/
+// directory.
+func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) {
+	return newNetDevSNMP6(p.path("net/dev_snmp6"))
+}
+
+// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory.
+func newNetDevSNMP6(dir string) (NetDevSNMP6, error) {
+	netDevSNMP6 := make(NetDevSNMP6)
+
+	// The net/dev_snmp6 folders contain one file per interface
+	ifaceFiles, err := os.ReadDir(dir)
+	if err != nil {
+		// On systems with IPv6 disabled, this directory won't exist.
+		// Do nothing.
+		if errors.Is(err, os.ErrNotExist) {
+			return netDevSNMP6, err
+		}
+		return netDevSNMP6, err
+	}
+
+	for _, iFaceFile := range ifaceFiles {
+		f, err := os.Open(dir + "/" + iFaceFile.Name())
+		if err != nil {
+			return netDevSNMP6, err
+		}
+		defer f.Close()
+
+		netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f)
+		if err != nil {
+			return netDevSNMP6, err
+		}
+	}
+
+	return netDevSNMP6, nil
+}
+
+func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) {
+	m := make(map[string]uint64)
+
+	scanner := bufio.NewScanner(r)
+	for scanner.Scan() {
+		stat := strings.Fields(scanner.Text())
+		if len(stat) < 2 {
+			continue
+		}
+		key, val := stat[0], stat[1]
+
+		// Expect stat name to contain "6" or be "ifIndex"
+		if strings.Contains(key, "6") || key == "ifIndex" {
+			v, err := strconv.ParseUint(val, 10, 64)
+			if err != nil {
+				return m, err
+			}
+
+			m[key] = v
+		}
+	}
+	return m, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go
index ac01dd8..19e3378 100644
--- a/vendor/github.com/prometheus/procfs/net_ip_socket.go
+++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go
@@ -25,7 +25,7 @@
 )
 
 const (
-	// readLimit is used by io.LimitReader while reading the content of the
+	// Maximum size limit used by io.LimitReader while reading the content of the
 	// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
 	// as each line represents a single used socket.
 	// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
@@ -34,7 +34,7 @@
 	readLimit = 4294967296 // Byte -> 4 GiB
 )
 
-// this contains generic data structures for both udp and tcp sockets
+// This contains generic data structures for both udp and tcp sockets.
 type (
 	// NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header.
 	NetIPSocket []*netIPSocketLine
@@ -50,10 +50,13 @@
 		// UsedSockets shows the total number of parsed lines representing the
 		// number of used sockets.
 		UsedSockets uint64
+		// Drops shows the total number of dropped packets of all UDP sockets.
+		Drops *uint64
 	}
 
-	// netIPSocketLine represents the fields parsed from a single line
-	// in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
+	// A single line parser for fields from /proc/net/{t,u}dp{,6}.
+	// Fields which are not used by IPSocket are skipped.
+	// Drops is non-nil for udp{,6}, but nil for tcp{,6}.
 	// For the proc file format details, see https://linux.die.net/man/5/proc.
 	netIPSocketLine struct {
 		Sl        uint64
@@ -65,6 +68,8 @@
 		TxQueue   uint64
 		RxQueue   uint64
 		UID       uint64
+		Inode     uint64
+		Drops     *uint64
 	}
 )
 
@@ -76,13 +81,14 @@
 	defer f.Close()
 
 	var netIPSocket NetIPSocket
+	isUDP := strings.Contains(file, "udp")
 
 	lr := io.LimitReader(f, readLimit)
 	s := bufio.NewScanner(lr)
 	s.Scan() // skip first line with headers
 	for s.Scan() {
 		fields := strings.Fields(s.Text())
-		line, err := parseNetIPSocketLine(fields)
+		line, err := parseNetIPSocketLine(fields, isUDP)
 		if err != nil {
 			return nil, err
 		}
@@ -103,19 +109,25 @@
 	defer f.Close()
 
 	var netIPSocketSummary NetIPSocketSummary
+	var udpPacketDrops uint64
+	isUDP := strings.Contains(file, "udp")
 
 	lr := io.LimitReader(f, readLimit)
 	s := bufio.NewScanner(lr)
 	s.Scan() // skip first line with headers
 	for s.Scan() {
 		fields := strings.Fields(s.Text())
-		line, err := parseNetIPSocketLine(fields)
+		line, err := parseNetIPSocketLine(fields, isUDP)
 		if err != nil {
 			return nil, err
 		}
 		netIPSocketSummary.TxQueueLength += line.TxQueue
 		netIPSocketSummary.RxQueueLength += line.RxQueue
 		netIPSocketSummary.UsedSockets++
+		if isUDP {
+			udpPacketDrops += *line.Drops
+			netIPSocketSummary.Drops = &udpPacketDrops
+		}
 	}
 	if err := s.Err(); err != nil {
 		return nil, err
@@ -129,7 +141,7 @@
 	var byteIP []byte
 	byteIP, err := hex.DecodeString(hexIP)
 	if err != nil {
-		return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP)
+		return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
 	}
 	switch len(byteIP) {
 	case 4:
@@ -143,16 +155,17 @@
 		}
 		return i, nil
 	default:
-		return nil, fmt.Errorf("Unable to parse IP %s", hexIP)
+		return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil)
 	}
 }
 
 // parseNetIPSocketLine parses a single line, represented by a list of fields.
-func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
+func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) {
 	line := &netIPSocketLine{}
-	if len(fields) < 8 {
+	if len(fields) < 10 {
 		return nil, fmt.Errorf(
-			"cannot parse net socket line as it has less then 8 columns %q",
+			"%w: Less than 10 columns found %q",
+			ErrFileParse,
 			strings.Join(fields, " "),
 		)
 	}
@@ -161,59 +174,74 @@
 	// sl
 	s := strings.Split(fields[0], ":")
 	if len(s) != 2 {
-		return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0])
+		return nil, fmt.Errorf("%w: Unable to parse sl field in line %q", ErrFileParse, fields[0])
 	}
 
 	if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
-		return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err)
+		return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
 	}
 	// local_address
 	l := strings.Split(fields[1], ":")
 	if len(l) != 2 {
-		return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1])
+		return nil, fmt.Errorf("%w: Unable to parse local_address field in %q", ErrFileParse, fields[1])
 	}
 	if line.LocalAddr, err = parseIP(l[0]); err != nil {
 		return nil, err
 	}
 	if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
-		return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err)
+		return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
 	}
 
 	// remote_address
 	r := strings.Split(fields[2], ":")
 	if len(r) != 2 {
-		return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1])
+		return nil, fmt.Errorf("%w: Unable to parse rem_address field in %q", ErrFileParse, fields[1])
 	}
 	if line.RemAddr, err = parseIP(r[0]); err != nil {
 		return nil, err
 	}
 	if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
-		return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err)
+		return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
 	}
 
 	// st
 	if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
-		return nil, fmt.Errorf("cannot parse st value in socket line: %w", err)
+		return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
 	}
 
 	// tx_queue and rx_queue
 	q := strings.Split(fields[4], ":")
 	if len(q) != 2 {
 		return nil, fmt.Errorf(
-			"cannot parse tx/rx queues in socket line as it has a missing colon %q",
+			"%w: Missing colon for tx/rx queues in socket line %q",
+			ErrFileParse,
 			fields[4],
 		)
 	}
 	if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
-		return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err)
+		return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
 	}
 	if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
-		return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err)
+		return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
 	}
 
 	// uid
 	if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
-		return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err)
+		return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
+	}
+
+	// inode
+	if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
+		return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
+	}
+
+	// drops
+	if isUDP {
+		drops, err := strconv.ParseUint(fields[12], 0, 64)
+		if err != nil {
+			return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err)
+		}
+		line.Drops = &drops
 	}
 
 	return line, nil
diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go
index 8c6de37..8d4b1ac 100644
--- a/vendor/github.com/prometheus/procfs/net_protocols.go
+++ b/vendor/github.com/prometheus/procfs/net_protocols.go
@@ -23,7 +23,7 @@
 	"github.com/prometheus/procfs/internal/util"
 )
 
-// NetProtocolStats stores the contents from /proc/net/protocols
+// NetProtocolStats stores the contents from /proc/net/protocols.
 type NetProtocolStats map[string]NetProtocolStatLine
 
 // NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We
@@ -41,7 +41,7 @@
 	Capabilities NetProtocolCapabilities
 }
 
-// NetProtocolCapabilities contains a list of capabilities for each protocol
+// NetProtocolCapabilities contains a list of capabilities for each protocol.
 type NetProtocolCapabilities struct {
 	Close               bool // 8
 	Connect             bool // 9
@@ -115,23 +115,25 @@
 	if err != nil {
 		return nil, err
 	}
-	if fields[4] == enabled {
+	switch fields[4] {
+	case enabled:
 		line.Pressure = 1
-	} else if fields[4] == disabled {
+	case disabled:
 		line.Pressure = 0
-	} else {
+	default:
 		line.Pressure = -1
 	}
 	line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64)
 	if err != nil {
 		return nil, err
 	}
-	if fields[6] == enabled {
+	switch fields[6] {
+	case enabled:
 		line.Slab = true
-	} else if fields[6] == disabled {
+	case disabled:
 		line.Slab = false
-	} else {
-		return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name)
+	default:
+		return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name)
 	}
 	line.ModuleName = fields[7]
 
@@ -168,12 +170,13 @@
 	}
 
 	for i := 0; i < len(capabilities); i++ {
-		if capabilities[i] == "y" {
+		switch capabilities[i] {
+		case "y":
 			*capabilityFields[i] = true
-		} else if capabilities[i] == "n" {
+		case "n":
 			*capabilityFields[i] = false
-		} else {
-			return fmt.Errorf("unable to parse capability block for protocol: position %d", i)
+		default:
+			return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i)
 		}
 	}
 	return nil
diff --git a/vendor/github.com/prometheus/procfs/net_route.go b/vendor/github.com/prometheus/procfs/net_route.go
new file mode 100644
index 0000000..deb7029
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_route.go
@@ -0,0 +1,143 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+const (
+	blackholeRepresentation string = "*"
+	blackholeIfaceName      string = "blackhole"
+	routeLineColumns        int    = 11
+)
+
+// A NetRouteLine represents one line from net/route.
+type NetRouteLine struct {
+	Iface       string
+	Destination uint32
+	Gateway     uint32
+	Flags       uint32
+	RefCnt      uint32
+	Use         uint32
+	Metric      uint32
+	Mask        uint32
+	MTU         uint32
+	Window      uint32
+	IRTT        uint32
+}
+
+func (fs FS) NetRoute() ([]NetRouteLine, error) {
+	return readNetRoute(fs.proc.Path("net", "route"))
+}
+
+func readNetRoute(path string) ([]NetRouteLine, error) {
+	b, err := util.ReadFileNoStat(path)
+	if err != nil {
+		return nil, err
+	}
+
+	routelines, err := parseNetRoute(bytes.NewReader(b))
+	if err != nil {
+		return nil, fmt.Errorf("failed to read net route from %s: %w", path, err)
+	}
+	return routelines, nil
+}
+
+func parseNetRoute(r io.Reader) ([]NetRouteLine, error) {
+	var routelines []NetRouteLine
+
+	scanner := bufio.NewScanner(r)
+	scanner.Scan()
+	for scanner.Scan() {
+		fields := strings.Fields(scanner.Text())
+		routeline, err := parseNetRouteLine(fields)
+		if err != nil {
+			return nil, err
+		}
+		routelines = append(routelines, *routeline)
+	}
+	return routelines, nil
+}
+
+func parseNetRouteLine(fields []string) (*NetRouteLine, error) {
+	if len(fields) != routeLineColumns {
+		return nil, fmt.Errorf("invalid routeline, num of digits: %d", len(fields))
+	}
+	iface := fields[0]
+	if iface == blackholeRepresentation {
+		iface = blackholeIfaceName
+	}
+	destination, err := strconv.ParseUint(fields[1], 16, 32)
+	if err != nil {
+		return nil, err
+	}
+	gateway, err := strconv.ParseUint(fields[2], 16, 32)
+	if err != nil {
+		return nil, err
+	}
+	flags, err := strconv.ParseUint(fields[3], 10, 32)
+	if err != nil {
+		return nil, err
+	}
+	refcnt, err := strconv.ParseUint(fields[4], 10, 32)
+	if err != nil {
+		return nil, err
+	}
+	use, err := strconv.ParseUint(fields[5], 10, 32)
+	if err != nil {
+		return nil, err
+	}
+	metric, err := strconv.ParseUint(fields[6], 10, 32)
+	if err != nil {
+		return nil, err
+	}
+	mask, err := strconv.ParseUint(fields[7], 16, 32)
+	if err != nil {
+		return nil, err
+	}
+	mtu, err := strconv.ParseUint(fields[8], 10, 32)
+	if err != nil {
+		return nil, err
+	}
+	window, err := strconv.ParseUint(fields[9], 10, 32)
+	if err != nil {
+		return nil, err
+	}
+	irtt, err := strconv.ParseUint(fields[10], 10, 32)
+	if err != nil {
+		return nil, err
+	}
+	routeline := &NetRouteLine{
+		Iface:       iface,
+		Destination: uint32(destination),
+		Gateway:     uint32(gateway),
+		Flags:       uint32(flags),
+		RefCnt:      uint32(refcnt),
+		Use:         uint32(use),
+		Metric:      uint32(metric),
+		Mask:        uint32(mask),
+		MTU:         uint32(mtu),
+		Window:      uint32(window),
+		IRTT:        uint32(irtt),
+	}
+	return routeline, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go
index e36f487..fae62b1 100644
--- a/vendor/github.com/prometheus/procfs/net_sockstat.go
+++ b/vendor/github.com/prometheus/procfs/net_sockstat.go
@@ -16,7 +16,6 @@
 import (
 	"bufio"
 	"bytes"
-	"errors"
 	"fmt"
 	"io"
 	"strings"
@@ -70,7 +69,7 @@
 
 	stat, err := parseSockstat(bytes.NewReader(b))
 	if err != nil {
-		return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err)
+		return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err)
 	}
 
 	return stat, nil
@@ -84,13 +83,13 @@
 		// Expect a minimum of a protocol and one key/value pair.
 		fields := strings.Split(s.Text(), " ")
 		if len(fields) < 3 {
-			return nil, fmt.Errorf("malformed sockstat line: %q", s.Text())
+			return nil, fmt.Errorf("%w: Malformed sockstat line: %q", ErrFileParse, s.Text())
 		}
 
 		// The remaining fields are key/value pairs.
 		kvs, err := parseSockstatKVs(fields[1:])
 		if err != nil {
-			return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err)
+			return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
 		}
 
 		// The first field is the protocol. We must trim its colon suffix.
@@ -119,7 +118,7 @@
 // parseSockstatKVs parses a string slice into a map of key/value pairs.
 func parseSockstatKVs(kvs []string) (map[string]int, error) {
 	if len(kvs)%2 != 0 {
-		return nil, errors.New("odd number of fields in key/value pairs")
+		return nil, fmt.Errorf("%w:: Odd number of fields in key/value pairs %q", ErrFileParse, kvs)
 	}
 
 	// Iterate two values at a time to gather key/value pairs.
diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go
index 46f12c6..71c8059 100644
--- a/vendor/github.com/prometheus/procfs/net_softnet.go
+++ b/vendor/github.com/prometheus/procfs/net_softnet.go
@@ -27,17 +27,30 @@
 // For the proc file format details,
 // See:
 // * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343
-// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
-// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
+// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
+// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
+// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
 
-// SoftnetStat contains a single row of data from /proc/net/softnet_stat
+// SoftnetStat contains a single row of data from /proc/net/softnet_stat.
 type SoftnetStat struct {
-	// Number of processed packets
+	// Number of processed packets.
 	Processed uint32
-	// Number of dropped packets
+	// Number of dropped packets.
 	Dropped uint32
-	// Number of times processing packets ran out of quota
+	// Number of times processing packets ran out of quota.
 	TimeSqueezed uint32
+	// Number of collision occur while obtaining device lock while transmitting.
+	CPUCollision uint32
+	// Number of times cpu woken up received_rps.
+	ReceivedRps uint32
+	// number of times flow limit has been reached.
+	FlowLimitCount uint32
+	// Softnet backlog status.
+	SoftnetBacklogLen uint32
+	// CPU id owning this softnet_data.
+	Index uint32
+	// softnet_data's Width.
+	Width int
 }
 
 var softNetProcFile = "net/softnet_stat"
@@ -51,7 +64,7 @@
 
 	entries, err := parseSoftnet(bytes.NewReader(b))
 	if err != nil {
-		return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err)
+		return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err)
 	}
 
 	return entries, nil
@@ -63,25 +76,65 @@
 	s := bufio.NewScanner(r)
 
 	var stats []SoftnetStat
+	cpuIndex := 0
 	for s.Scan() {
 		columns := strings.Fields(s.Text())
 		width := len(columns)
+		softnetStat := SoftnetStat{}
 
 		if width < minColumns {
-			return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns)
+			return nil, fmt.Errorf("%w: detected %d columns, but expected at least %d", ErrFileParse, width, minColumns)
 		}
 
-		// We only parse the first three columns at the moment.
-		us, err := parseHexUint32s(columns[0:3])
-		if err != nil {
-			return nil, err
+		// Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347
+		if width >= minColumns {
+			us, err := parseHexUint32s(columns[0:9])
+			if err != nil {
+				return nil, err
+			}
+
+			softnetStat.Processed = us[0]
+			softnetStat.Dropped = us[1]
+			softnetStat.TimeSqueezed = us[2]
+			softnetStat.CPUCollision = us[8]
 		}
 
-		stats = append(stats, SoftnetStat{
-			Processed:    us[0],
-			Dropped:      us[1],
-			TimeSqueezed: us[2],
-		})
+		// Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
+		if width >= 10 {
+			us, err := parseHexUint32s(columns[9:10])
+			if err != nil {
+				return nil, err
+			}
+
+			softnetStat.ReceivedRps = us[0]
+		}
+
+		// Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
+		if width >= 11 {
+			us, err := parseHexUint32s(columns[10:11])
+			if err != nil {
+				return nil, err
+			}
+
+			softnetStat.FlowLimitCount = us[0]
+		}
+
+		// Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
+		if width >= 13 {
+			us, err := parseHexUint32s(columns[11:13])
+			if err != nil {
+				return nil, err
+			}
+
+			softnetStat.SoftnetBacklogLen = us[0]
+			softnetStat.Index = us[1]
+		} else {
+			// For older kernels, create the Index based on the scan line number.
+			softnetStat.Index = uint32(cpuIndex)
+		}
+		softnetStat.Width = width
+		stats = append(stats, softnetStat)
+		cpuIndex++
 	}
 
 	return stats, nil
diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go
index 5277629..0396d72 100644
--- a/vendor/github.com/prometheus/procfs/net_tcp.go
+++ b/vendor/github.com/prometheus/procfs/net_tcp.go
@@ -25,24 +25,28 @@
 
 // NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
 // read from /proc/net/tcp.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
 func (fs FS) NetTCP() (NetTCP, error) {
 	return newNetTCP(fs.proc.Path("net/tcp"))
 }
 
 // NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
 // read from /proc/net/tcp6.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
 func (fs FS) NetTCP6() (NetTCP, error) {
 	return newNetTCP(fs.proc.Path("net/tcp6"))
 }
 
 // NetTCPSummary returns already computed statistics like the total queue lengths
 // for TCP datagrams read from /proc/net/tcp.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
 func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
 	return newNetTCPSummary(fs.proc.Path("net/tcp"))
 }
 
 // NetTCP6Summary returns already computed statistics like the total queue lengths
 // for TCP datagrams read from /proc/net/tcp6.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
 func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
 	return newNetTCPSummary(fs.proc.Path("net/tcp6"))
 }
diff --git a/vendor/github.com/prometheus/procfs/net_tls_stat.go b/vendor/github.com/prometheus/procfs/net_tls_stat.go
new file mode 100644
index 0000000..13994c1
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_tls_stat.go
@@ -0,0 +1,119 @@
+// Copyright 2023 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// TLSStat struct represents data in /proc/net/tls_stat.
+// See https://docs.kernel.org/networking/tls.html#statistics
+type TLSStat struct {
+	// number of TX sessions currently installed where host handles cryptography
+	TLSCurrTxSw int
+	// number of RX sessions currently installed where host handles cryptography
+	TLSCurrRxSw int
+	// number of TX sessions currently installed where NIC handles cryptography
+	TLSCurrTxDevice int
+	// number of RX sessions currently installed where NIC handles cryptography
+	TLSCurrRxDevice int
+	//number of TX sessions opened with host cryptography
+	TLSTxSw int
+	//number of RX sessions opened with host cryptography
+	TLSRxSw int
+	// number of TX sessions opened with NIC cryptography
+	TLSTxDevice int
+	// number of RX sessions opened with NIC cryptography
+	TLSRxDevice int
+	// record decryption failed (e.g. due to incorrect authentication tag)
+	TLSDecryptError int
+	//  number of RX resyncs sent to NICs handling cryptography
+	TLSRxDeviceResync int
+	// number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records.
+	TLSDecryptRetry int
+	// number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction.
+	TLSRxNoPadViolation int
+}
+
+// NewTLSStat reads the tls_stat statistics.
+func NewTLSStat() (TLSStat, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil {
+		return TLSStat{}, err
+	}
+
+	return fs.NewTLSStat()
+}
+
+// NewTLSStat reads the tls_stat statistics.
+func (fs FS) NewTLSStat() (TLSStat, error) {
+	file, err := os.Open(fs.proc.Path("net/tls_stat"))
+	if err != nil {
+		return TLSStat{}, err
+	}
+	defer file.Close()
+
+	var (
+		tlsstat = TLSStat{}
+		s       = bufio.NewScanner(file)
+	)
+
+	for s.Scan() {
+		fields := strings.Fields(s.Text())
+
+		if len(fields) != 2 {
+			return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
+		}
+
+		name := fields[0]
+		value, err := strconv.Atoi(fields[1])
+		if err != nil {
+			return TLSStat{}, err
+		}
+
+		switch name {
+		case "TlsCurrTxSw":
+			tlsstat.TLSCurrTxSw = value
+		case "TlsCurrRxSw":
+			tlsstat.TLSCurrRxSw = value
+		case "TlsCurrTxDevice":
+			tlsstat.TLSCurrTxDevice = value
+		case "TlsCurrRxDevice":
+			tlsstat.TLSCurrRxDevice = value
+		case "TlsTxSw":
+			tlsstat.TLSTxSw = value
+		case "TlsRxSw":
+			tlsstat.TLSRxSw = value
+		case "TlsTxDevice":
+			tlsstat.TLSTxDevice = value
+		case "TlsRxDevice":
+			tlsstat.TLSRxDevice = value
+		case "TlsDecryptError":
+			tlsstat.TLSDecryptError = value
+		case "TlsRxDeviceResync":
+			tlsstat.TLSRxDeviceResync = value
+		case "TlsDecryptRetry":
+			tlsstat.TLSDecryptRetry = value
+		case "TlsRxNoPadViolation":
+			tlsstat.TLSRxNoPadViolation = value
+		}
+
+	}
+
+	return tlsstat, s.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go
index 98aa8e1..d7e0cac 100644
--- a/vendor/github.com/prometheus/procfs/net_unix.go
+++ b/vendor/github.com/prometheus/procfs/net_unix.go
@@ -108,25 +108,25 @@
 		line := s.Text()
 		item, err := nu.parseLine(line, hasInode, minFields)
 		if err != nil {
-			return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err)
+			return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
 		}
 
 		nu.Rows = append(nu.Rows, item)
 	}
 
 	if err := s.Err(); err != nil {
-		return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err)
+		return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err)
 	}
 
 	return &nu, nil
 }
 
-func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
+func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) {
 	fields := strings.Fields(line)
 
 	l := len(fields)
-	if l < min {
-		return nil, fmt.Errorf("expected at least %d fields but got %d", min, l)
+	if l < minFields {
+		return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l)
 	}
 
 	// Field offsets are as follows:
@@ -136,29 +136,29 @@
 
 	users, err := u.parseUsers(fields[1])
 	if err != nil {
-		return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err)
+		return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err)
 	}
 
 	flags, err := u.parseFlags(fields[3])
 	if err != nil {
-		return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err)
+		return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
 	}
 
 	typ, err := u.parseType(fields[4])
 	if err != nil {
-		return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err)
+		return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
 	}
 
 	state, err := u.parseState(fields[5])
 	if err != nil {
-		return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err)
+		return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
 	}
 
 	var inode uint64
 	if hasInode {
 		inode, err = u.parseInode(fields[6])
 		if err != nil {
-			return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err)
+			return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err)
 		}
 	}
 
@@ -172,7 +172,7 @@
 	}
 
 	// Path field is optional.
-	if l > min {
+	if l > minFields {
 		// Path occurs at either index 6 or 7 depending on whether inode is
 		// already present.
 		pathIdx := 7
diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go
new file mode 100644
index 0000000..7c597bc
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_wireless.go
@@ -0,0 +1,182 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Wireless models the content of /proc/net/wireless.
+type Wireless struct {
+	Name string
+
+	// Status is the current 4-digit hex value status of the interface.
+	Status uint64
+
+	// QualityLink is the link quality.
+	QualityLink int
+
+	// QualityLevel is the signal gain (dBm).
+	QualityLevel int
+
+	// QualityNoise is the signal noise baseline (dBm).
+	QualityNoise int
+
+	// DiscardedNwid is the number of discarded packets with wrong nwid/essid.
+	DiscardedNwid int
+
+	// DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP).
+	DiscardedCrypt int
+
+	// DiscardedFrag is the number of discarded packets that can't perform MAC reassembly.
+	DiscardedFrag int
+
+	// DiscardedRetry is the number of discarded packets that reached max MAC retries.
+	DiscardedRetry int
+
+	// DiscardedMisc is the number of discarded packets for other reasons.
+	DiscardedMisc int
+
+	// MissedBeacon is the number of missed beacons/superframe.
+	MissedBeacon int
+}
+
+// Wireless returns kernel wireless statistics.
+func (fs FS) Wireless() ([]*Wireless, error) {
+	b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless"))
+	if err != nil {
+		return nil, err
+	}
+
+	m, err := parseWireless(bytes.NewReader(b))
+	if err != nil {
+		return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err)
+	}
+
+	return m, nil
+}
+
+// parseWireless parses the contents of /proc/net/wireless.
+/*
+Inter-| sta-|   Quality        |   Discarded packets               | Missed | WE
+face | tus | link level noise |  nwid  crypt   frag  retry   misc | beacon | 22
+ eth1: 0000    5.  -256.  -10.       0      1      0     3      0        0
+ eth2: 0000    5.  -256.  -20.       0      2      0     4      0        0
+*/
+func parseWireless(r io.Reader) ([]*Wireless, error) {
+	var (
+		interfaces []*Wireless
+		scanner    = bufio.NewScanner(r)
+	)
+
+	for n := 0; scanner.Scan(); n++ {
+		// Skip the 2 header lines.
+		if n < 2 {
+			continue
+		}
+
+		line := scanner.Text()
+
+		parts := strings.Split(line, ":")
+		if len(parts) != 2 {
+			return nil, fmt.Errorf("%w: expected 2 parts after splitting line by ':', got %d for line %q", ErrFileParse, len(parts), line)
+		}
+
+		name := strings.TrimSpace(parts[0])
+		stats := strings.Fields(parts[1])
+
+		if len(stats) < 10 {
+			return nil, fmt.Errorf("%w: invalid number of fields in line %d, expected 10+, got %d: %q", ErrFileParse, n, len(stats), line)
+		}
+
+		status, err := strconv.ParseUint(stats[0], 16, 16)
+		if err != nil {
+			return nil, fmt.Errorf("%w: invalid status in line %d: %q", ErrFileParse, n, line)
+		}
+
+		qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
+		if err != nil {
+			return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
+		}
+
+		qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
+		if err != nil {
+			return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
+		}
+
+		qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
+		if err != nil {
+			return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
+		}
+
+		dnwid, err := strconv.Atoi(stats[4])
+		if err != nil {
+			return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
+		}
+
+		dcrypt, err := strconv.Atoi(stats[5])
+		if err != nil {
+			return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
+		}
+
+		dfrag, err := strconv.Atoi(stats[6])
+		if err != nil {
+			return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
+		}
+
+		dretry, err := strconv.Atoi(stats[7])
+		if err != nil {
+			return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
+		}
+
+		dmisc, err := strconv.Atoi(stats[8])
+		if err != nil {
+			return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
+		}
+
+		mbeacon, err := strconv.Atoi(stats[9])
+		if err != nil {
+			return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
+		}
+
+		w := &Wireless{
+			Name:           name,
+			Status:         status,
+			QualityLink:    qlink,
+			QualityLevel:   qlevel,
+			QualityNoise:   qnoise,
+			DiscardedNwid:  dnwid,
+			DiscardedCrypt: dcrypt,
+			DiscardedFrag:  dfrag,
+			DiscardedRetry: dretry,
+			DiscardedMisc:  dmisc,
+			MissedBeacon:   mbeacon,
+		}
+
+		interfaces = append(interfaces, w)
+	}
+
+	if err := scanner.Err(); err != nil {
+		return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
+	}
+
+	return interfaces, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go
similarity index 94%
rename from vendor/github.com/prometheus/procfs/xfrm.go
rename to vendor/github.com/prometheus/procfs/net_xfrm.go
index eed07c7..932ef20 100644
--- a/vendor/github.com/prometheus/procfs/xfrm.go
+++ b/vendor/github.com/prometheus/procfs/net_xfrm.go
@@ -79,10 +79,13 @@
 	// Policy is dead
 	XfrmOutPolDead int
 	// Policy Error
-	XfrmOutPolError     int
-	XfrmFwdHdrError     int
+	XfrmOutPolError int
+	// Forward routing of a packet is not allowed
+	XfrmFwdHdrError int
+	// State is invalid, perhaps expired
 	XfrmOutStateInvalid int
-	XfrmAcquireError    int
+	// State hasn’t been fully acquired before use
+	XfrmAcquireError int
 }
 
 // NewXfrmStat reads the xfrm_stat statistics.
@@ -112,7 +115,7 @@
 		fields := strings.Fields(s.Text())
 
 		if len(fields) != 2 {
-			return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text())
+			return XfrmStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
 		}
 
 		name := fields[0]
diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go
new file mode 100644
index 0000000..742dff4
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/netstat.go
@@ -0,0 +1,82 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+)
+
+// NetStat contains statistics for all the counters from one file.
+type NetStat struct {
+	Stats    map[string][]uint64
+	Filename string
+}
+
+// NetStat retrieves stats from `/proc/net/stat/`.
+func (fs FS) NetStat() ([]NetStat, error) {
+	statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*"))
+	if err != nil {
+		return nil, err
+	}
+
+	var netStatsTotal []NetStat
+
+	for _, filePath := range statFiles {
+		procNetstat, err := parseNetstat(filePath)
+		if err != nil {
+			return nil, err
+		}
+		procNetstat.Filename = filepath.Base(filePath)
+
+		netStatsTotal = append(netStatsTotal, procNetstat)
+	}
+	return netStatsTotal, nil
+}
+
+// parseNetstat parses the metrics from `/proc/net/stat/` file
+// and returns a NetStat structure.
+func parseNetstat(filePath string) (NetStat, error) {
+	netStat := NetStat{
+		Stats: make(map[string][]uint64),
+	}
+	file, err := os.Open(filePath)
+	if err != nil {
+		return netStat, err
+	}
+	defer file.Close()
+
+	scanner := bufio.NewScanner(file)
+	scanner.Scan()
+
+	// First string is always a header for stats
+	var headers []string
+	headers = append(headers, strings.Fields(scanner.Text())...)
+
+	// Other strings represent per-CPU counters
+	for scanner.Scan() {
+		for num, counter := range strings.Fields(scanner.Text()) {
+			value, err := strconv.ParseUint(counter, 16, 64)
+			if err != nil {
+				return NetStat{}, err
+			}
+			netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value)
+		}
+	}
+
+	return netStat, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
index 28f6968..368187f 100644
--- a/vendor/github.com/prometheus/procfs/proc.go
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -15,13 +15,13 @@
 
 import (
 	"bytes"
+	"errors"
 	"fmt"
-	"io/ioutil"
+	"io"
 	"os"
 	"strconv"
 	"strings"
 
-	"github.com/prometheus/procfs/internal/fs"
 	"github.com/prometheus/procfs/internal/util"
 )
 
@@ -30,12 +30,18 @@
 	// The process ID.
 	PID int
 
-	fs fs.FS
+	fs FS
 }
 
 // Procs represents a list of Proc structs.
 type Procs []Proc
 
+var (
+	ErrFileParse  = errors.New("error parsing file")
+	ErrFileRead   = errors.New("error reading file")
+	ErrMountPoint = errors.New("error accessing mount point")
+)
+
 func (p Procs) Len() int           { return len(p) }
 func (p Procs) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
 func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
@@ -43,7 +49,7 @@
 // Self returns a process for the current process read via /proc/self.
 func Self() (Proc, error) {
 	fs, err := NewFS(DefaultMountPoint)
-	if err != nil {
+	if err != nil || errors.Unwrap(err) == ErrMountPoint {
 		return Proc{}, err
 	}
 	return fs.Self()
@@ -73,7 +79,7 @@
 	if err != nil {
 		return Proc{}, err
 	}
-	pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1))
+	pid, err := strconv.Atoi(strings.ReplaceAll(p, string(fs.proc), ""))
 	if err != nil {
 		return Proc{}, err
 	}
@@ -82,7 +88,7 @@
 
 // NewProc returns a process for the given pid.
 //
-// Deprecated: use fs.Proc() instead
+// Deprecated: Use fs.Proc() instead.
 func (fs FS) NewProc(pid int) (Proc, error) {
 	return fs.Proc(pid)
 }
@@ -92,7 +98,7 @@
 	if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
 		return Proc{}, err
 	}
-	return Proc{PID: pid, fs: fs.proc}, nil
+	return Proc{PID: pid, fs: fs}, nil
 }
 
 // AllProcs returns a list of all currently available processes.
@@ -105,7 +111,7 @@
 
 	names, err := d.Readdirnames(-1)
 	if err != nil {
-		return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err)
+		return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
 	}
 
 	p := Procs{}
@@ -114,7 +120,7 @@
 		if err != nil {
 			continue
 		}
-		p = append(p, Proc{PID: int(pid), fs: fs.proc})
+		p = append(p, Proc{PID: int(pid), fs: fs})
 	}
 
 	return p, nil
@@ -131,7 +137,7 @@
 		return []string{}, nil
 	}
 
-	return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
+	return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil
 }
 
 // Wchan returns the wchan (wait channel) of a process.
@@ -142,7 +148,7 @@
 	}
 	defer f.Close()
 
-	data, err := ioutil.ReadAll(f)
+	data, err := io.ReadAll(f)
 	if err != nil {
 		return "", err
 	}
@@ -185,7 +191,7 @@
 	return wd, err
 }
 
-// RootDir returns the absolute path to the process's root directory (as set by chroot)
+// RootDir returns the absolute path to the process's root directory (as set by chroot).
 func (p Proc) RootDir() (string, error) {
 	rdir, err := os.Readlink(p.path("root"))
 	if os.IsNotExist(err) {
@@ -206,7 +212,7 @@
 	for i, n := range names {
 		fd, err := strconv.ParseInt(n, 10, 32)
 		if err != nil {
-			return nil, fmt.Errorf("could not parse fd %q: %w", n, err)
+			return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err)
 		}
 		fds[i] = uintptr(fd)
 	}
@@ -237,6 +243,19 @@
 // FileDescriptorsLen returns the number of currently open file descriptors of
 // a process.
 func (p Proc) FileDescriptorsLen() (int, error) {
+	// Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901
+	if p.fs.isReal {
+		stat, err := os.Stat(p.path("fd"))
+		if err != nil {
+			return 0, err
+		}
+
+		size := stat.Size()
+		if size > 0 {
+			return int(size), nil
+		}
+	}
+
 	fds, err := p.fileDescriptors()
 	if err != nil {
 		return 0, err
@@ -278,14 +297,14 @@
 
 	names, err := d.Readdirnames(-1)
 	if err != nil {
-		return nil, fmt.Errorf("could not read %q: %w", d.Name(), err)
+		return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
 	}
 
 	return names, nil
 }
 
 func (p Proc) path(pa ...string) string {
-	return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
+	return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
 }
 
 // FileDescriptorsInfo retrieves information about all file descriptors of
@@ -311,7 +330,7 @@
 
 // Schedstat returns task scheduling information for the process.
 func (p Proc) Schedstat() (ProcSchedstat, error) {
-	contents, err := ioutil.ReadFile(p.path("schedstat"))
+	contents, err := os.ReadFile(p.path("schedstat"))
 	if err != nil {
 		return ProcSchedstat{}, err
 	}
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go
index 0094a13..4a64347 100644
--- a/vendor/github.com/prometheus/procfs/proc_cgroup.go
+++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go
@@ -23,8 +23,8 @@
 	"github.com/prometheus/procfs/internal/util"
 )
 
-// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a
-// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
+// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
+// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource
 // controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
 // contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
 // this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
@@ -45,13 +45,13 @@
 }
 
 // parseCgroupString parses each line of the /proc/[pid]/cgroup file
-// Line format is hierarchyID:[controller1,controller2]:path
+// Line format is hierarchyID:[controller1,controller2]:path.
 func parseCgroupString(cgroupStr string) (*Cgroup, error) {
 	var err error
 
 	fields := strings.SplitN(cgroupStr, ":", 3)
 	if len(fields) < 3 {
-		return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr)
+		return nil, fmt.Errorf("%w: 3+ fields required, found %d fields in cgroup string: %s", ErrFileParse, len(fields), cgroupStr)
 	}
 
 	cgroup := &Cgroup{
@@ -60,7 +60,7 @@
 	}
 	cgroup.HierarchyID, err = strconv.Atoi(fields[0])
 	if err != nil {
-		return nil, fmt.Errorf("failed to parse hierarchy ID")
+		return nil, fmt.Errorf("%w: hierarchy ID: %q", ErrFileParse, cgroup.HierarchyID)
 	}
 	if fields[1] != "" {
 		ssNames := strings.Split(fields[1], ",")
@@ -69,7 +69,7 @@
 	return cgroup, nil
 }
 
-// parseCgroups reads each line of the /proc/[pid]/cgroup file
+// parseCgroups reads each line of the /proc/[pid]/cgroup file.
 func parseCgroups(data []byte) ([]Cgroup, error) {
 	var cgroups []Cgroup
 	scanner := bufio.NewScanner(bytes.NewReader(data))
@@ -88,9 +88,9 @@
 
 // Cgroups reads from /proc/<pid>/cgroups and returns a []*Cgroup struct locating this PID in each process
 // control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes,
-// so the len of the returned struct is equal to the number of active hierarchies on this system
+// so the len of the returned struct is equal to the number of active hierarchies on this system.
 func (p Proc) Cgroups() ([]Cgroup, error) {
-	data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/cgroup", p.PID))
+	data, err := util.ReadFileNoStat(p.path("cgroup"))
 	if err != nil {
 		return nil, err
 	}
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go
new file mode 100644
index 0000000..5dd4938
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_cgroups.go
@@ -0,0 +1,98 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// CgroupSummary models one line from /proc/cgroups.
+// This file contains information about the controllers that are compiled into the kernel.
+//
+// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html
+type CgroupSummary struct {
+	// The name of the controller. controller is also known as subsystem.
+	SubsysName string
+	// The unique ID of the cgroup hierarchy on which this controller is mounted.
+	Hierarchy int
+	// The number of control groups in this hierarchy using this controller.
+	Cgroups int
+	// This field contains the value 1 if this controller is enabled, or 0 if it has been disabled
+	Enabled int
+}
+
+// parseCgroupSummary parses each line of the /proc/cgroup file
+// Line format is `subsys_name	hierarchy	num_cgroups	enabled`.
+func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) {
+	var err error
+
+	fields := strings.Fields(CgroupSummaryStr)
+	// require at least 4 fields
+	if len(fields) < 4 {
+		return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr)
+	}
+
+	CgroupSummary := &CgroupSummary{
+		SubsysName: fields[0],
+	}
+	CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1])
+	if err != nil {
+		return nil, fmt.Errorf("%w: Unable to parse hierarchy ID from %q", ErrFileParse, fields[1])
+	}
+	CgroupSummary.Cgroups, err = strconv.Atoi(fields[2])
+	if err != nil {
+		return nil, fmt.Errorf("%w: Unable to parse Cgroup Num from %q", ErrFileParse, fields[2])
+	}
+	CgroupSummary.Enabled, err = strconv.Atoi(fields[3])
+	if err != nil {
+		return nil, fmt.Errorf("%w: Unable to parse Enabled from %q", ErrFileParse, fields[3])
+	}
+	return CgroupSummary, nil
+}
+
+// parseCgroupSummary reads each line of the /proc/cgroup file.
+func parseCgroupSummary(data []byte) ([]CgroupSummary, error) {
+	var CgroupSummarys []CgroupSummary
+	scanner := bufio.NewScanner(bytes.NewReader(data))
+	for scanner.Scan() {
+		CgroupSummaryString := scanner.Text()
+		// ignore comment lines
+		if strings.HasPrefix(CgroupSummaryString, "#") {
+			continue
+		}
+		CgroupSummary, err := parseCgroupSummaryString(CgroupSummaryString)
+		if err != nil {
+			return nil, err
+		}
+		CgroupSummarys = append(CgroupSummarys, *CgroupSummary)
+	}
+
+	err := scanner.Err()
+	return CgroupSummarys, err
+}
+
+// CgroupSummarys returns information about current /proc/cgroups.
+func (fs FS) CgroupSummarys() ([]CgroupSummary, error) {
+	data, err := util.ReadFileNoStat(fs.proc.Path("cgroups"))
+	if err != nil {
+		return nil, err
+	}
+	return parseCgroupSummary(data)
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go
index 6134b35..57a8989 100644
--- a/vendor/github.com/prometheus/procfs/proc_environ.go
+++ b/vendor/github.com/prometheus/procfs/proc_environ.go
@@ -19,7 +19,7 @@
 	"github.com/prometheus/procfs/internal/util"
 )
 
-// Environ reads process environments from /proc/<pid>/environ
+// Environ reads process environments from `/proc/<pid>/environ`.
 func (p Proc) Environ() ([]string, error) {
 	environments := make([]string, 0)
 
diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
index cf63227..fa761b3 100644
--- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go
+++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
@@ -22,11 +22,11 @@
 	"github.com/prometheus/procfs/internal/util"
 )
 
-// Regexp variables
 var (
 	rPos          = regexp.MustCompile(`^pos:\s+(\d+)$`)
 	rFlags        = regexp.MustCompile(`^flags:\s+(\d+)$`)
 	rMntID        = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
+	rIno          = regexp.MustCompile(`^ino:\s+(\d+)$`)
 	rInotify      = regexp.MustCompile(`^inotify`)
 	rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`)
 )
@@ -41,6 +41,8 @@
 	Flags string
 	// Mount point ID
 	MntID string
+	// Inode number
+	Ino string
 	// List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only)
 	InotifyInfos []InotifyInfo
 }
@@ -52,7 +54,7 @@
 		return nil, err
 	}
 
-	var text, pos, flags, mntid string
+	var text, pos, flags, mntid, ino string
 	var inotify []InotifyInfo
 
 	scanner := bufio.NewScanner(bytes.NewReader(data))
@@ -64,6 +66,8 @@
 			flags = rFlags.FindStringSubmatch(text)[1]
 		} else if rMntID.MatchString(text) {
 			mntid = rMntID.FindStringSubmatch(text)[1]
+		} else if rIno.MatchString(text) {
+			ino = rIno.FindStringSubmatch(text)[1]
 		} else if rInotify.MatchString(text) {
 			newInotify, err := parseInotifyInfo(text)
 			if err != nil {
@@ -78,6 +82,7 @@
 		Pos:          pos,
 		Flags:        flags,
 		MntID:        mntid,
+		Ino:          ino,
 		InotifyInfos: inotify,
 	}
 
@@ -112,7 +117,7 @@
 		}
 		return i, nil
 	}
-	return nil, fmt.Errorf("invalid inode entry: %q", line)
+	return nil, fmt.Errorf("%w: invalid inode entry: %q", ErrFileParse, line)
 }
 
 // ProcFDInfos represents a list of ProcFDInfo structs.
@@ -122,7 +127,7 @@
 func (p ProcFDInfos) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
 func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD }
 
-// InotifyWatchLen returns the total number of inotify watches
+// InotifyWatchLen returns the total number of inotify watches.
 func (p ProcFDInfos) InotifyWatchLen() (int, error) {
 	length := 0
 	for _, f := range p {
diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go
new file mode 100644
index 0000000..86b4b45
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go
@@ -0,0 +1,98 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Interrupt represents a single interrupt line.
+type Interrupt struct {
+	// Info is the type of interrupt.
+	Info string
+	// Devices is the name of the device that is located at that IRQ
+	Devices string
+	// Values is the number of interrupts per CPU.
+	Values []string
+}
+
+// Interrupts models the content of /proc/interrupts. Key is the IRQ number.
+// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts
+// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output
+type Interrupts map[string]Interrupt
+
+// Interrupts creates a new instance from a given Proc instance.
+func (p Proc) Interrupts() (Interrupts, error) {
+	data, err := util.ReadFileNoStat(p.path("interrupts"))
+	if err != nil {
+		return nil, err
+	}
+	return parseInterrupts(bytes.NewReader(data))
+}
+
+func parseInterrupts(r io.Reader) (Interrupts, error) {
+	var (
+		interrupts = Interrupts{}
+		scanner    = bufio.NewScanner(r)
+	)
+
+	if !scanner.Scan() {
+		return nil, errors.New("interrupts empty")
+	}
+	cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu
+
+	for scanner.Scan() {
+		parts := strings.Fields(scanner.Text())
+		if len(parts) == 0 { // skip empty lines
+			continue
+		}
+		if len(parts) < 2 {
+			return nil, fmt.Errorf("%w: Not enough fields in interrupts (expected 2+ fields but got %d): %s", ErrFileParse, len(parts), parts)
+		}
+		intName := parts[0][:len(parts[0])-1] // remove trailing :
+
+		if len(parts) == 2 {
+			interrupts[intName] = Interrupt{
+				Info:    "",
+				Devices: "",
+				Values: []string{
+					parts[1],
+				},
+			}
+			continue
+		}
+
+		intr := Interrupt{
+			Values: parts[1 : cpuNum+1],
+		}
+
+		if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt
+			intr.Info = parts[cpuNum+1]
+			intr.Devices = strings.Join(parts[cpuNum+2:], " ")
+		} else {
+			intr.Info = strings.Join(parts[cpuNum+1:], " ")
+		}
+		interrupts[intName] = intr
+	}
+
+	return interrupts, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
index 776f349..d15b66d 100644
--- a/vendor/github.com/prometheus/procfs/proc_io.go
+++ b/vendor/github.com/prometheus/procfs/proc_io.go
@@ -50,7 +50,7 @@
 
 	ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
 		"read_bytes: %d\nwrite_bytes: %d\n" +
-		"cancelled_write_bytes: %d\n"
+		"cancelled_write_bytes: %d\n" //nolint:misspell
 
 	_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
 		&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
index dd20f19..9530b14 100644
--- a/vendor/github.com/prometheus/procfs/proc_limits.go
+++ b/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -79,7 +79,7 @@
 
 // NewLimits returns the current soft limits of the process.
 //
-// Deprecated: use p.Limits() instead
+// Deprecated: Use p.Limits() instead.
 func (p Proc) NewLimits() (ProcLimits, error) {
 	return p.Limits()
 }
@@ -103,7 +103,7 @@
 		//fields := limitsMatch.Split(s.Text(), limitsFields)
 		fields := limitsMatch.FindStringSubmatch(s.Text())
 		if len(fields) != limitsFields {
-			return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text())
+			return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text())
 		}
 
 		switch fields[1] {
@@ -154,7 +154,7 @@
 	}
 	i, err := strconv.ParseUint(s, 10, 64)
 	if err != nil {
-		return 0, fmt.Errorf("couldn't parse value %q: %w", s, err)
+		return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err)
 	}
 	return i, nil
 }
diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go
index 1d7772d..7e75c28 100644
--- a/vendor/github.com/prometheus/procfs/proc_maps.go
+++ b/vendor/github.com/prometheus/procfs/proc_maps.go
@@ -11,7 +11,9 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && !js
 // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build !js
 
 package procfs
 
@@ -25,7 +27,7 @@
 	"golang.org/x/sys/unix"
 )
 
-// ProcMapPermissions contains permission settings read from /proc/[pid]/maps
+// ProcMapPermissions contains permission settings read from `/proc/[pid]/maps`.
 type ProcMapPermissions struct {
 	// mapping has the [R]ead flag set
 	Read bool
@@ -39,8 +41,8 @@
 	Private bool
 }
 
-// ProcMap contains the process memory-mappings of the process,
-// read from /proc/[pid]/maps
+// ProcMap contains the process memory-mappings of the process
+// read from `/proc/[pid]/maps`.
 type ProcMap struct {
 	// The start address of current mapping.
 	StartAddr uintptr
@@ -61,17 +63,17 @@
 // parseDevice parses the device token of a line and converts it to a dev_t
 // (mkdev) like structure.
 func parseDevice(s string) (uint64, error) {
-	toks := strings.Split(s, ":")
-	if len(toks) < 2 {
-		return 0, fmt.Errorf("unexpected number of fields")
+	i := strings.Index(s, ":")
+	if i == -1 {
+		return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s)
 	}
 
-	major, err := strconv.ParseUint(toks[0], 16, 0)
+	major, err := strconv.ParseUint(s[0:i], 16, 0)
 	if err != nil {
 		return 0, err
 	}
 
-	minor, err := strconv.ParseUint(toks[1], 16, 0)
+	minor, err := strconv.ParseUint(s[i+1:], 16, 0)
 	if err != nil {
 		return 0, err
 	}
@@ -79,7 +81,7 @@
 	return unix.Mkdev(uint32(major), uint32(minor)), nil
 }
 
-// parseAddress just converts a hex-string to a uintptr
+// parseAddress converts a hex-string to a uintptr.
 func parseAddress(s string) (uintptr, error) {
 	a, err := strconv.ParseUint(s, 16, 0)
 	if err != nil {
@@ -89,19 +91,19 @@
 	return uintptr(a), nil
 }
 
-// parseAddresses parses the start-end address
+// parseAddresses parses the start-end address.
 func parseAddresses(s string) (uintptr, uintptr, error) {
-	toks := strings.Split(s, "-")
-	if len(toks) < 2 {
-		return 0, 0, fmt.Errorf("invalid address")
+	idx := strings.Index(s, "-")
+	if idx == -1 {
+		return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s)
 	}
 
-	saddr, err := parseAddress(toks[0])
+	saddr, err := parseAddress(s[0:idx])
 	if err != nil {
 		return 0, 0, err
 	}
 
-	eaddr, err := parseAddress(toks[1])
+	eaddr, err := parseAddress(s[idx+1:])
 	if err != nil {
 		return 0, 0, err
 	}
@@ -112,7 +114,7 @@
 // parsePermissions parses a token and returns any that are set.
 func parsePermissions(s string) (*ProcMapPermissions, error) {
 	if len(s) < 4 {
-		return nil, fmt.Errorf("invalid permissions token")
+		return nil, fmt.Errorf("%w: invalid permissions token", ErrFileParse)
 	}
 
 	perms := ProcMapPermissions{}
@@ -139,7 +141,7 @@
 func parseProcMap(text string) (*ProcMap, error) {
 	fields := strings.Fields(text)
 	if len(fields) < 5 {
-		return nil, fmt.Errorf("truncated procmap entry")
+		return nil, fmt.Errorf("%w: truncated procmap entry", ErrFileParse)
 	}
 
 	saddr, eaddr, err := parseAddresses(fields[0])
diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go
new file mode 100644
index 0000000..4248c17
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_netstat.go
@@ -0,0 +1,443 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// ProcNetstat models the content of /proc/<pid>/net/netstat.
+type ProcNetstat struct {
+	// The process ID.
+	PID int
+	TcpExt
+	IpExt
+}
+
+type TcpExt struct { // nolint:revive
+	SyncookiesSent            *float64
+	SyncookiesRecv            *float64
+	SyncookiesFailed          *float64
+	EmbryonicRsts             *float64
+	PruneCalled               *float64
+	RcvPruned                 *float64
+	OfoPruned                 *float64
+	OutOfWindowIcmps          *float64
+	LockDroppedIcmps          *float64
+	ArpFilter                 *float64
+	TW                        *float64
+	TWRecycled                *float64
+	TWKilled                  *float64
+	PAWSActive                *float64
+	PAWSEstab                 *float64
+	DelayedACKs               *float64
+	DelayedACKLocked          *float64
+	DelayedACKLost            *float64
+	ListenOverflows           *float64
+	ListenDrops               *float64
+	TCPHPHits                 *float64
+	TCPPureAcks               *float64
+	TCPHPAcks                 *float64
+	TCPRenoRecovery           *float64
+	TCPSackRecovery           *float64
+	TCPSACKReneging           *float64
+	TCPSACKReorder            *float64
+	TCPRenoReorder            *float64
+	TCPTSReorder              *float64
+	TCPFullUndo               *float64
+	TCPPartialUndo            *float64
+	TCPDSACKUndo              *float64
+	TCPLossUndo               *float64
+	TCPLostRetransmit         *float64
+	TCPRenoFailures           *float64
+	TCPSackFailures           *float64
+	TCPLossFailures           *float64
+	TCPFastRetrans            *float64
+	TCPSlowStartRetrans       *float64
+	TCPTimeouts               *float64
+	TCPLossProbes             *float64
+	TCPLossProbeRecovery      *float64
+	TCPRenoRecoveryFail       *float64
+	TCPSackRecoveryFail       *float64
+	TCPRcvCollapsed           *float64
+	TCPDSACKOldSent           *float64
+	TCPDSACKOfoSent           *float64
+	TCPDSACKRecv              *float64
+	TCPDSACKOfoRecv           *float64
+	TCPAbortOnData            *float64
+	TCPAbortOnClose           *float64
+	TCPAbortOnMemory          *float64
+	TCPAbortOnTimeout         *float64
+	TCPAbortOnLinger          *float64
+	TCPAbortFailed            *float64
+	TCPMemoryPressures        *float64
+	TCPMemoryPressuresChrono  *float64
+	TCPSACKDiscard            *float64
+	TCPDSACKIgnoredOld        *float64
+	TCPDSACKIgnoredNoUndo     *float64
+	TCPSpuriousRTOs           *float64
+	TCPMD5NotFound            *float64
+	TCPMD5Unexpected          *float64
+	TCPMD5Failure             *float64
+	TCPSackShifted            *float64
+	TCPSackMerged             *float64
+	TCPSackShiftFallback      *float64
+	TCPBacklogDrop            *float64
+	PFMemallocDrop            *float64
+	TCPMinTTLDrop             *float64
+	TCPDeferAcceptDrop        *float64
+	IPReversePathFilter       *float64
+	TCPTimeWaitOverflow       *float64
+	TCPReqQFullDoCookies      *float64
+	TCPReqQFullDrop           *float64
+	TCPRetransFail            *float64
+	TCPRcvCoalesce            *float64
+	TCPRcvQDrop               *float64
+	TCPOFOQueue               *float64
+	TCPOFODrop                *float64
+	TCPOFOMerge               *float64
+	TCPChallengeACK           *float64
+	TCPSYNChallenge           *float64
+	TCPFastOpenActive         *float64
+	TCPFastOpenActiveFail     *float64
+	TCPFastOpenPassive        *float64
+	TCPFastOpenPassiveFail    *float64
+	TCPFastOpenListenOverflow *float64
+	TCPFastOpenCookieReqd     *float64
+	TCPFastOpenBlackhole      *float64
+	TCPSpuriousRtxHostQueues  *float64
+	BusyPollRxPackets         *float64
+	TCPAutoCorking            *float64
+	TCPFromZeroWindowAdv      *float64
+	TCPToZeroWindowAdv        *float64
+	TCPWantZeroWindowAdv      *float64
+	TCPSynRetrans             *float64
+	TCPOrigDataSent           *float64
+	TCPHystartTrainDetect     *float64
+	TCPHystartTrainCwnd       *float64
+	TCPHystartDelayDetect     *float64
+	TCPHystartDelayCwnd       *float64
+	TCPACKSkippedSynRecv      *float64
+	TCPACKSkippedPAWS         *float64
+	TCPACKSkippedSeq          *float64
+	TCPACKSkippedFinWait2     *float64
+	TCPACKSkippedTimeWait     *float64
+	TCPACKSkippedChallenge    *float64
+	TCPWinProbe               *float64
+	TCPKeepAlive              *float64
+	TCPMTUPFail               *float64
+	TCPMTUPSuccess            *float64
+	TCPWqueueTooBig           *float64
+}
+
+type IpExt struct { // nolint:revive
+	InNoRoutes      *float64
+	InTruncatedPkts *float64
+	InMcastPkts     *float64
+	OutMcastPkts    *float64
+	InBcastPkts     *float64
+	OutBcastPkts    *float64
+	InOctets        *float64
+	OutOctets       *float64
+	InMcastOctets   *float64
+	OutMcastOctets  *float64
+	InBcastOctets   *float64
+	OutBcastOctets  *float64
+	InCsumErrors    *float64
+	InNoECTPkts     *float64
+	InECT1Pkts      *float64
+	InECT0Pkts      *float64
+	InCEPkts        *float64
+	ReasmOverlaps   *float64
+}
+
+func (p Proc) Netstat() (ProcNetstat, error) {
+	filename := p.path("net/netstat")
+	data, err := util.ReadFileNoStat(filename)
+	if err != nil {
+		return ProcNetstat{PID: p.PID}, err
+	}
+	procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename)
+	procNetstat.PID = p.PID
+	return procNetstat, err
+}
+
+// parseProcNetstat parses the metrics from proc/<pid>/net/netstat file
+// and returns a ProcNetstat structure.
+func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
+	var (
+		scanner     = bufio.NewScanner(r)
+		procNetstat = ProcNetstat{}
+	)
+
+	for scanner.Scan() {
+		nameParts := strings.Split(scanner.Text(), " ")
+		scanner.Scan()
+		valueParts := strings.Split(scanner.Text(), " ")
+		// Remove trailing :.
+		protocol := strings.TrimSuffix(nameParts[0], ":")
+		if len(nameParts) != len(valueParts) {
+			return procNetstat, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
+				ErrFileParse, fileName, protocol)
+		}
+		for i := 1; i < len(nameParts); i++ {
+			value, err := strconv.ParseFloat(valueParts[i], 64)
+			if err != nil {
+				return procNetstat, err
+			}
+			key := nameParts[i]
+
+			switch protocol {
+			case "TcpExt":
+				switch key {
+				case "SyncookiesSent":
+					procNetstat.SyncookiesSent = &value
+				case "SyncookiesRecv":
+					procNetstat.SyncookiesRecv = &value
+				case "SyncookiesFailed":
+					procNetstat.SyncookiesFailed = &value
+				case "EmbryonicRsts":
+					procNetstat.EmbryonicRsts = &value
+				case "PruneCalled":
+					procNetstat.PruneCalled = &value
+				case "RcvPruned":
+					procNetstat.RcvPruned = &value
+				case "OfoPruned":
+					procNetstat.OfoPruned = &value
+				case "OutOfWindowIcmps":
+					procNetstat.OutOfWindowIcmps = &value
+				case "LockDroppedIcmps":
+					procNetstat.LockDroppedIcmps = &value
+				case "ArpFilter":
+					procNetstat.ArpFilter = &value
+				case "TW":
+					procNetstat.TW = &value
+				case "TWRecycled":
+					procNetstat.TWRecycled = &value
+				case "TWKilled":
+					procNetstat.TWKilled = &value
+				case "PAWSActive":
+					procNetstat.PAWSActive = &value
+				case "PAWSEstab":
+					procNetstat.PAWSEstab = &value
+				case "DelayedACKs":
+					procNetstat.DelayedACKs = &value
+				case "DelayedACKLocked":
+					procNetstat.DelayedACKLocked = &value
+				case "DelayedACKLost":
+					procNetstat.DelayedACKLost = &value
+				case "ListenOverflows":
+					procNetstat.ListenOverflows = &value
+				case "ListenDrops":
+					procNetstat.ListenDrops = &value
+				case "TCPHPHits":
+					procNetstat.TCPHPHits = &value
+				case "TCPPureAcks":
+					procNetstat.TCPPureAcks = &value
+				case "TCPHPAcks":
+					procNetstat.TCPHPAcks = &value
+				case "TCPRenoRecovery":
+					procNetstat.TCPRenoRecovery = &value
+				case "TCPSackRecovery":
+					procNetstat.TCPSackRecovery = &value
+				case "TCPSACKReneging":
+					procNetstat.TCPSACKReneging = &value
+				case "TCPSACKReorder":
+					procNetstat.TCPSACKReorder = &value
+				case "TCPRenoReorder":
+					procNetstat.TCPRenoReorder = &value
+				case "TCPTSReorder":
+					procNetstat.TCPTSReorder = &value
+				case "TCPFullUndo":
+					procNetstat.TCPFullUndo = &value
+				case "TCPPartialUndo":
+					procNetstat.TCPPartialUndo = &value
+				case "TCPDSACKUndo":
+					procNetstat.TCPDSACKUndo = &value
+				case "TCPLossUndo":
+					procNetstat.TCPLossUndo = &value
+				case "TCPLostRetransmit":
+					procNetstat.TCPLostRetransmit = &value
+				case "TCPRenoFailures":
+					procNetstat.TCPRenoFailures = &value
+				case "TCPSackFailures":
+					procNetstat.TCPSackFailures = &value
+				case "TCPLossFailures":
+					procNetstat.TCPLossFailures = &value
+				case "TCPFastRetrans":
+					procNetstat.TCPFastRetrans = &value
+				case "TCPSlowStartRetrans":
+					procNetstat.TCPSlowStartRetrans = &value
+				case "TCPTimeouts":
+					procNetstat.TCPTimeouts = &value
+				case "TCPLossProbes":
+					procNetstat.TCPLossProbes = &value
+				case "TCPLossProbeRecovery":
+					procNetstat.TCPLossProbeRecovery = &value
+				case "TCPRenoRecoveryFail":
+					procNetstat.TCPRenoRecoveryFail = &value
+				case "TCPSackRecoveryFail":
+					procNetstat.TCPSackRecoveryFail = &value
+				case "TCPRcvCollapsed":
+					procNetstat.TCPRcvCollapsed = &value
+				case "TCPDSACKOldSent":
+					procNetstat.TCPDSACKOldSent = &value
+				case "TCPDSACKOfoSent":
+					procNetstat.TCPDSACKOfoSent = &value
+				case "TCPDSACKRecv":
+					procNetstat.TCPDSACKRecv = &value
+				case "TCPDSACKOfoRecv":
+					procNetstat.TCPDSACKOfoRecv = &value
+				case "TCPAbortOnData":
+					procNetstat.TCPAbortOnData = &value
+				case "TCPAbortOnClose":
+					procNetstat.TCPAbortOnClose = &value
+				case "TCPDeferAcceptDrop":
+					procNetstat.TCPDeferAcceptDrop = &value
+				case "IPReversePathFilter":
+					procNetstat.IPReversePathFilter = &value
+				case "TCPTimeWaitOverflow":
+					procNetstat.TCPTimeWaitOverflow = &value
+				case "TCPReqQFullDoCookies":
+					procNetstat.TCPReqQFullDoCookies = &value
+				case "TCPReqQFullDrop":
+					procNetstat.TCPReqQFullDrop = &value
+				case "TCPRetransFail":
+					procNetstat.TCPRetransFail = &value
+				case "TCPRcvCoalesce":
+					procNetstat.TCPRcvCoalesce = &value
+				case "TCPRcvQDrop":
+					procNetstat.TCPRcvQDrop = &value
+				case "TCPOFOQueue":
+					procNetstat.TCPOFOQueue = &value
+				case "TCPOFODrop":
+					procNetstat.TCPOFODrop = &value
+				case "TCPOFOMerge":
+					procNetstat.TCPOFOMerge = &value
+				case "TCPChallengeACK":
+					procNetstat.TCPChallengeACK = &value
+				case "TCPSYNChallenge":
+					procNetstat.TCPSYNChallenge = &value
+				case "TCPFastOpenActive":
+					procNetstat.TCPFastOpenActive = &value
+				case "TCPFastOpenActiveFail":
+					procNetstat.TCPFastOpenActiveFail = &value
+				case "TCPFastOpenPassive":
+					procNetstat.TCPFastOpenPassive = &value
+				case "TCPFastOpenPassiveFail":
+					procNetstat.TCPFastOpenPassiveFail = &value
+				case "TCPFastOpenListenOverflow":
+					procNetstat.TCPFastOpenListenOverflow = &value
+				case "TCPFastOpenCookieReqd":
+					procNetstat.TCPFastOpenCookieReqd = &value
+				case "TCPFastOpenBlackhole":
+					procNetstat.TCPFastOpenBlackhole = &value
+				case "TCPSpuriousRtxHostQueues":
+					procNetstat.TCPSpuriousRtxHostQueues = &value
+				case "BusyPollRxPackets":
+					procNetstat.BusyPollRxPackets = &value
+				case "TCPAutoCorking":
+					procNetstat.TCPAutoCorking = &value
+				case "TCPFromZeroWindowAdv":
+					procNetstat.TCPFromZeroWindowAdv = &value
+				case "TCPToZeroWindowAdv":
+					procNetstat.TCPToZeroWindowAdv = &value
+				case "TCPWantZeroWindowAdv":
+					procNetstat.TCPWantZeroWindowAdv = &value
+				case "TCPSynRetrans":
+					procNetstat.TCPSynRetrans = &value
+				case "TCPOrigDataSent":
+					procNetstat.TCPOrigDataSent = &value
+				case "TCPHystartTrainDetect":
+					procNetstat.TCPHystartTrainDetect = &value
+				case "TCPHystartTrainCwnd":
+					procNetstat.TCPHystartTrainCwnd = &value
+				case "TCPHystartDelayDetect":
+					procNetstat.TCPHystartDelayDetect = &value
+				case "TCPHystartDelayCwnd":
+					procNetstat.TCPHystartDelayCwnd = &value
+				case "TCPACKSkippedSynRecv":
+					procNetstat.TCPACKSkippedSynRecv = &value
+				case "TCPACKSkippedPAWS":
+					procNetstat.TCPACKSkippedPAWS = &value
+				case "TCPACKSkippedSeq":
+					procNetstat.TCPACKSkippedSeq = &value
+				case "TCPACKSkippedFinWait2":
+					procNetstat.TCPACKSkippedFinWait2 = &value
+				case "TCPACKSkippedTimeWait":
+					procNetstat.TCPACKSkippedTimeWait = &value
+				case "TCPACKSkippedChallenge":
+					procNetstat.TCPACKSkippedChallenge = &value
+				case "TCPWinProbe":
+					procNetstat.TCPWinProbe = &value
+				case "TCPKeepAlive":
+					procNetstat.TCPKeepAlive = &value
+				case "TCPMTUPFail":
+					procNetstat.TCPMTUPFail = &value
+				case "TCPMTUPSuccess":
+					procNetstat.TCPMTUPSuccess = &value
+				case "TCPWqueueTooBig":
+					procNetstat.TCPWqueueTooBig = &value
+				}
+			case "IpExt":
+				switch key {
+				case "InNoRoutes":
+					procNetstat.InNoRoutes = &value
+				case "InTruncatedPkts":
+					procNetstat.InTruncatedPkts = &value
+				case "InMcastPkts":
+					procNetstat.InMcastPkts = &value
+				case "OutMcastPkts":
+					procNetstat.OutMcastPkts = &value
+				case "InBcastPkts":
+					procNetstat.InBcastPkts = &value
+				case "OutBcastPkts":
+					procNetstat.OutBcastPkts = &value
+				case "InOctets":
+					procNetstat.InOctets = &value
+				case "OutOctets":
+					procNetstat.OutOctets = &value
+				case "InMcastOctets":
+					procNetstat.InMcastOctets = &value
+				case "OutMcastOctets":
+					procNetstat.OutMcastOctets = &value
+				case "InBcastOctets":
+					procNetstat.InBcastOctets = &value
+				case "OutBcastOctets":
+					procNetstat.OutBcastOctets = &value
+				case "InCsumErrors":
+					procNetstat.InCsumErrors = &value
+				case "InNoECTPkts":
+					procNetstat.InNoECTPkts = &value
+				case "InECT1Pkts":
+					procNetstat.InECT1Pkts = &value
+				case "InECT0Pkts":
+					procNetstat.InECT0Pkts = &value
+				case "InCEPkts":
+					procNetstat.InCEPkts = &value
+				case "ReasmOverlaps":
+					procNetstat.ReasmOverlaps = &value
+				}
+			}
+		}
+	}
+	return procNetstat, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go
index 391b4cb..0f8f847 100644
--- a/vendor/github.com/prometheus/procfs/proc_ns.go
+++ b/vendor/github.com/prometheus/procfs/proc_ns.go
@@ -40,7 +40,7 @@
 
 	names, err := d.Readdirnames(-1)
 	if err != nil {
-		return nil, fmt.Errorf("failed to read contents of ns dir: %w", err)
+		return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err)
 	}
 
 	ns := make(Namespaces, len(names))
@@ -52,13 +52,13 @@
 
 		fields := strings.SplitN(target, ":", 2)
 		if len(fields) != 2 {
-			return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target)
+			return nil, fmt.Errorf("%w: namespace type and inode from %q", ErrFileParse, target)
 		}
 
 		typ := fields[0]
 		inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
 		if err != nil {
-			return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err)
+			return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err)
 		}
 
 		ns[name] = Namespace{typ, uint32(inode)}
diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go
index dc6c14f..ccd35f1 100644
--- a/vendor/github.com/prometheus/procfs/proc_psi.go
+++ b/vendor/github.com/prometheus/procfs/proc_psi.go
@@ -35,9 +35,10 @@
 
 const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d"
 
-// PSILine is a single line of values as returned by /proc/pressure/*
-// The Avg entries are averages over n seconds, as a percentage
-// The Total line is in microseconds
+// PSILine is a single line of values as returned by `/proc/pressure/*`.
+//
+// The Avg entries are averages over n seconds, as a percentage.
+// The Total line is in microseconds.
 type PSILine struct {
 	Avg10  float64
 	Avg60  float64
@@ -46,8 +47,9 @@
 }
 
 // PSIStats represent pressure stall information from /proc/pressure/*
-// Some indicates the share of time in which at least some tasks are stalled
-// Full indicates the share of time in which all non-idle tasks are stalled simultaneously
+//
+// "Some" indicates the share of time in which at least some tasks are stalled.
+// "Full" indicates the share of time in which all non-idle tasks are stalled simultaneously.
 type PSIStats struct {
 	Some *PSILine
 	Full *PSILine
@@ -59,14 +61,14 @@
 func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
 	data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
 	if err != nil {
-		return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err)
+		return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
 	}
 
-	return parsePSIStats(resource, bytes.NewReader(data))
+	return parsePSIStats(bytes.NewReader(data))
 }
 
-// parsePSIStats parses the specified file for pressure stall information
-func parsePSIStats(resource string, r io.Reader) (PSIStats, error) {
+// parsePSIStats parses the specified file for pressure stall information.
+func parsePSIStats(r io.Reader) (PSIStats, error) {
 	psiStats := PSIStats{}
 
 	scanner := bufio.NewScanner(r)
diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go
index a576a72..9a297af 100644
--- a/vendor/github.com/prometheus/procfs/proc_smaps.go
+++ b/vendor/github.com/prometheus/procfs/proc_smaps.go
@@ -11,6 +11,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+//go:build !windows
 // +build !windows
 
 package procfs
@@ -18,7 +19,6 @@
 import (
 	"bufio"
 	"errors"
-	"fmt"
 	"os"
 	"regexp"
 	"strconv"
@@ -28,30 +28,30 @@
 )
 
 var (
-	// match the header line before each mapped zone in /proc/pid/smaps
+	// Match the header line before each mapped zone in `/proc/pid/smaps`.
 	procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
 )
 
 type ProcSMapsRollup struct {
-	// Amount of the mapping that is currently resident in RAM
+	// Amount of the mapping that is currently resident in RAM.
 	Rss uint64
-	// Process's proportional share of this mapping
+	// Process's proportional share of this mapping.
 	Pss uint64
-	// Size in bytes of clean shared pages
+	// Size in bytes of clean shared pages.
 	SharedClean uint64
-	// Size in bytes of dirty shared pages
+	// Size in bytes of dirty shared pages.
 	SharedDirty uint64
-	// Size in bytes of clean private pages
+	// Size in bytes of clean private pages.
 	PrivateClean uint64
-	// Size in bytes of dirty private pages
+	// Size in bytes of dirty private pages.
 	PrivateDirty uint64
-	// Amount of memory currently marked as referenced or accessed
+	// Amount of memory currently marked as referenced or accessed.
 	Referenced uint64
-	// Amount of memory that does not belong to any file
+	// Amount of memory that does not belong to any file.
 	Anonymous uint64
-	// Amount would-be-anonymous memory currently on swap
+	// Amount would-be-anonymous memory currently on swap.
 	Swap uint64
-	// Process's proportional memory on swap
+	// Process's proportional memory on swap.
 	SwapPss uint64
 }
 
@@ -116,7 +116,6 @@
 func (s *ProcSMapsRollup) parseLine(line string) error {
 	kv := strings.SplitN(line, ":", 2)
 	if len(kv) != 2 {
-		fmt.Println(line)
 		return errors.New("invalid net/dev line, missing colon")
 	}
 
@@ -126,7 +125,7 @@
 	}
 
 	v := strings.TrimSpace(kv[1])
-	v = strings.TrimRight(v, " kB")
+	v = strings.TrimSuffix(v, " kB")
 
 	vKBytes, err := strconv.ParseUint(v, 10, 64)
 	if err != nil {
@@ -134,12 +133,12 @@
 	}
 	vBytes := vKBytes * 1024
 
-	s.addValue(k, v, vKBytes, vBytes)
+	s.addValue(k, vBytes)
 
 	return nil
 }
 
-func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) {
+func (s *ProcSMapsRollup) addValue(k string, vUintBytes uint64) {
 	switch k {
 	case "Rss":
 		s.Rss += vUintBytes
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go
new file mode 100644
index 0000000..4bdc90b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_snmp.go
@@ -0,0 +1,353 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// ProcSnmp models the content of /proc/<pid>/net/snmp.
+type ProcSnmp struct {
+	// The process ID.
+	PID int
+	Ip
+	Icmp
+	IcmpMsg
+	Tcp
+	Udp
+	UdpLite
+}
+
+type Ip struct { // nolint:revive
+	Forwarding      *float64
+	DefaultTTL      *float64
+	InReceives      *float64
+	InHdrErrors     *float64
+	InAddrErrors    *float64
+	ForwDatagrams   *float64
+	InUnknownProtos *float64
+	InDiscards      *float64
+	InDelivers      *float64
+	OutRequests     *float64
+	OutDiscards     *float64
+	OutNoRoutes     *float64
+	ReasmTimeout    *float64
+	ReasmReqds      *float64
+	ReasmOKs        *float64
+	ReasmFails      *float64
+	FragOKs         *float64
+	FragFails       *float64
+	FragCreates     *float64
+}
+
+type Icmp struct { // nolint:revive
+	InMsgs           *float64
+	InErrors         *float64
+	InCsumErrors     *float64
+	InDestUnreachs   *float64
+	InTimeExcds      *float64
+	InParmProbs      *float64
+	InSrcQuenchs     *float64
+	InRedirects      *float64
+	InEchos          *float64
+	InEchoReps       *float64
+	InTimestamps     *float64
+	InTimestampReps  *float64
+	InAddrMasks      *float64
+	InAddrMaskReps   *float64
+	OutMsgs          *float64
+	OutErrors        *float64
+	OutDestUnreachs  *float64
+	OutTimeExcds     *float64
+	OutParmProbs     *float64
+	OutSrcQuenchs    *float64
+	OutRedirects     *float64
+	OutEchos         *float64
+	OutEchoReps      *float64
+	OutTimestamps    *float64
+	OutTimestampReps *float64
+	OutAddrMasks     *float64
+	OutAddrMaskReps  *float64
+}
+
+type IcmpMsg struct {
+	InType3  *float64
+	OutType3 *float64
+}
+
+type Tcp struct { // nolint:revive
+	RtoAlgorithm *float64
+	RtoMin       *float64
+	RtoMax       *float64
+	MaxConn      *float64
+	ActiveOpens  *float64
+	PassiveOpens *float64
+	AttemptFails *float64
+	EstabResets  *float64
+	CurrEstab    *float64
+	InSegs       *float64
+	OutSegs      *float64
+	RetransSegs  *float64
+	InErrs       *float64
+	OutRsts      *float64
+	InCsumErrors *float64
+}
+
+type Udp struct { // nolint:revive
+	InDatagrams  *float64
+	NoPorts      *float64
+	InErrors     *float64
+	OutDatagrams *float64
+	RcvbufErrors *float64
+	SndbufErrors *float64
+	InCsumErrors *float64
+	IgnoredMulti *float64
+}
+
+type UdpLite struct { // nolint:revive
+	InDatagrams  *float64
+	NoPorts      *float64
+	InErrors     *float64
+	OutDatagrams *float64
+	RcvbufErrors *float64
+	SndbufErrors *float64
+	InCsumErrors *float64
+	IgnoredMulti *float64
+}
+
+func (p Proc) Snmp() (ProcSnmp, error) {
+	filename := p.path("net/snmp")
+	data, err := util.ReadFileNoStat(filename)
+	if err != nil {
+		return ProcSnmp{PID: p.PID}, err
+	}
+	procSnmp, err := parseSnmp(bytes.NewReader(data), filename)
+	procSnmp.PID = p.PID
+	return procSnmp, err
+}
+
+// parseSnmp parses the metrics from proc/<pid>/net/snmp file
+// and returns a map contains those metrics (e.g. {"Ip": {"Forwarding": 2}}).
+func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
+	var (
+		scanner  = bufio.NewScanner(r)
+		procSnmp = ProcSnmp{}
+	)
+
+	for scanner.Scan() {
+		nameParts := strings.Split(scanner.Text(), " ")
+		scanner.Scan()
+		valueParts := strings.Split(scanner.Text(), " ")
+		// Remove trailing :.
+		protocol := strings.TrimSuffix(nameParts[0], ":")
+		if len(nameParts) != len(valueParts) {
+			return procSnmp, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
+				ErrFileParse, fileName, protocol)
+		}
+		for i := 1; i < len(nameParts); i++ {
+			value, err := strconv.ParseFloat(valueParts[i], 64)
+			if err != nil {
+				return procSnmp, err
+			}
+			key := nameParts[i]
+
+			switch protocol {
+			case "Ip":
+				switch key {
+				case "Forwarding":
+					procSnmp.Forwarding = &value
+				case "DefaultTTL":
+					procSnmp.DefaultTTL = &value
+				case "InReceives":
+					procSnmp.InReceives = &value
+				case "InHdrErrors":
+					procSnmp.InHdrErrors = &value
+				case "InAddrErrors":
+					procSnmp.InAddrErrors = &value
+				case "ForwDatagrams":
+					procSnmp.ForwDatagrams = &value
+				case "InUnknownProtos":
+					procSnmp.InUnknownProtos = &value
+				case "InDiscards":
+					procSnmp.InDiscards = &value
+				case "InDelivers":
+					procSnmp.InDelivers = &value
+				case "OutRequests":
+					procSnmp.OutRequests = &value
+				case "OutDiscards":
+					procSnmp.OutDiscards = &value
+				case "OutNoRoutes":
+					procSnmp.OutNoRoutes = &value
+				case "ReasmTimeout":
+					procSnmp.ReasmTimeout = &value
+				case "ReasmReqds":
+					procSnmp.ReasmReqds = &value
+				case "ReasmOKs":
+					procSnmp.ReasmOKs = &value
+				case "ReasmFails":
+					procSnmp.ReasmFails = &value
+				case "FragOKs":
+					procSnmp.FragOKs = &value
+				case "FragFails":
+					procSnmp.FragFails = &value
+				case "FragCreates":
+					procSnmp.FragCreates = &value
+				}
+			case "Icmp":
+				switch key {
+				case "InMsgs":
+					procSnmp.InMsgs = &value
+				case "InErrors":
+					procSnmp.Icmp.InErrors = &value
+				case "InCsumErrors":
+					procSnmp.Icmp.InCsumErrors = &value
+				case "InDestUnreachs":
+					procSnmp.InDestUnreachs = &value
+				case "InTimeExcds":
+					procSnmp.InTimeExcds = &value
+				case "InParmProbs":
+					procSnmp.InParmProbs = &value
+				case "InSrcQuenchs":
+					procSnmp.InSrcQuenchs = &value
+				case "InRedirects":
+					procSnmp.InRedirects = &value
+				case "InEchos":
+					procSnmp.InEchos = &value
+				case "InEchoReps":
+					procSnmp.InEchoReps = &value
+				case "InTimestamps":
+					procSnmp.InTimestamps = &value
+				case "InTimestampReps":
+					procSnmp.InTimestampReps = &value
+				case "InAddrMasks":
+					procSnmp.InAddrMasks = &value
+				case "InAddrMaskReps":
+					procSnmp.InAddrMaskReps = &value
+				case "OutMsgs":
+					procSnmp.OutMsgs = &value
+				case "OutErrors":
+					procSnmp.OutErrors = &value
+				case "OutDestUnreachs":
+					procSnmp.OutDestUnreachs = &value
+				case "OutTimeExcds":
+					procSnmp.OutTimeExcds = &value
+				case "OutParmProbs":
+					procSnmp.OutParmProbs = &value
+				case "OutSrcQuenchs":
+					procSnmp.OutSrcQuenchs = &value
+				case "OutRedirects":
+					procSnmp.OutRedirects = &value
+				case "OutEchos":
+					procSnmp.OutEchos = &value
+				case "OutEchoReps":
+					procSnmp.OutEchoReps = &value
+				case "OutTimestamps":
+					procSnmp.OutTimestamps = &value
+				case "OutTimestampReps":
+					procSnmp.OutTimestampReps = &value
+				case "OutAddrMasks":
+					procSnmp.OutAddrMasks = &value
+				case "OutAddrMaskReps":
+					procSnmp.OutAddrMaskReps = &value
+				}
+			case "IcmpMsg":
+				switch key {
+				case "InType3":
+					procSnmp.InType3 = &value
+				case "OutType3":
+					procSnmp.OutType3 = &value
+				}
+			case "Tcp":
+				switch key {
+				case "RtoAlgorithm":
+					procSnmp.RtoAlgorithm = &value
+				case "RtoMin":
+					procSnmp.RtoMin = &value
+				case "RtoMax":
+					procSnmp.RtoMax = &value
+				case "MaxConn":
+					procSnmp.MaxConn = &value
+				case "ActiveOpens":
+					procSnmp.ActiveOpens = &value
+				case "PassiveOpens":
+					procSnmp.PassiveOpens = &value
+				case "AttemptFails":
+					procSnmp.AttemptFails = &value
+				case "EstabResets":
+					procSnmp.EstabResets = &value
+				case "CurrEstab":
+					procSnmp.CurrEstab = &value
+				case "InSegs":
+					procSnmp.InSegs = &value
+				case "OutSegs":
+					procSnmp.OutSegs = &value
+				case "RetransSegs":
+					procSnmp.RetransSegs = &value
+				case "InErrs":
+					procSnmp.InErrs = &value
+				case "OutRsts":
+					procSnmp.OutRsts = &value
+				case "InCsumErrors":
+					procSnmp.Tcp.InCsumErrors = &value
+				}
+			case "Udp":
+				switch key {
+				case "InDatagrams":
+					procSnmp.Udp.InDatagrams = &value
+				case "NoPorts":
+					procSnmp.Udp.NoPorts = &value
+				case "InErrors":
+					procSnmp.Udp.InErrors = &value
+				case "OutDatagrams":
+					procSnmp.Udp.OutDatagrams = &value
+				case "RcvbufErrors":
+					procSnmp.Udp.RcvbufErrors = &value
+				case "SndbufErrors":
+					procSnmp.Udp.SndbufErrors = &value
+				case "InCsumErrors":
+					procSnmp.Udp.InCsumErrors = &value
+				case "IgnoredMulti":
+					procSnmp.Udp.IgnoredMulti = &value
+				}
+			case "UdpLite":
+				switch key {
+				case "InDatagrams":
+					procSnmp.UdpLite.InDatagrams = &value
+				case "NoPorts":
+					procSnmp.UdpLite.NoPorts = &value
+				case "InErrors":
+					procSnmp.UdpLite.InErrors = &value
+				case "OutDatagrams":
+					procSnmp.UdpLite.OutDatagrams = &value
+				case "RcvbufErrors":
+					procSnmp.UdpLite.RcvbufErrors = &value
+				case "SndbufErrors":
+					procSnmp.UdpLite.SndbufErrors = &value
+				case "InCsumErrors":
+					procSnmp.UdpLite.InCsumErrors = &value
+				case "IgnoredMulti":
+					procSnmp.UdpLite.IgnoredMulti = &value
+				}
+			}
+		}
+	}
+	return procSnmp, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go
new file mode 100644
index 0000000..fb7fd39
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go
@@ -0,0 +1,381 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"io"
+	"os"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// ProcSnmp6 models the content of /proc/<pid>/net/snmp6.
+type ProcSnmp6 struct {
+	// The process ID.
+	PID int
+	Ip6
+	Icmp6
+	Udp6
+	UdpLite6
+}
+
+type Ip6 struct { // nolint:revive
+	InReceives       *float64
+	InHdrErrors      *float64
+	InTooBigErrors   *float64
+	InNoRoutes       *float64
+	InAddrErrors     *float64
+	InUnknownProtos  *float64
+	InTruncatedPkts  *float64
+	InDiscards       *float64
+	InDelivers       *float64
+	OutForwDatagrams *float64
+	OutRequests      *float64
+	OutDiscards      *float64
+	OutNoRoutes      *float64
+	ReasmTimeout     *float64
+	ReasmReqds       *float64
+	ReasmOKs         *float64
+	ReasmFails       *float64
+	FragOKs          *float64
+	FragFails        *float64
+	FragCreates      *float64
+	InMcastPkts      *float64
+	OutMcastPkts     *float64
+	InOctets         *float64
+	OutOctets        *float64
+	InMcastOctets    *float64
+	OutMcastOctets   *float64
+	InBcastOctets    *float64
+	OutBcastOctets   *float64
+	InNoECTPkts      *float64
+	InECT1Pkts       *float64
+	InECT0Pkts       *float64
+	InCEPkts         *float64
+}
+
+type Icmp6 struct {
+	InMsgs                    *float64
+	InErrors                  *float64
+	OutMsgs                   *float64
+	OutErrors                 *float64
+	InCsumErrors              *float64
+	InDestUnreachs            *float64
+	InPktTooBigs              *float64
+	InTimeExcds               *float64
+	InParmProblems            *float64
+	InEchos                   *float64
+	InEchoReplies             *float64
+	InGroupMembQueries        *float64
+	InGroupMembResponses      *float64
+	InGroupMembReductions     *float64
+	InRouterSolicits          *float64
+	InRouterAdvertisements    *float64
+	InNeighborSolicits        *float64
+	InNeighborAdvertisements  *float64
+	InRedirects               *float64
+	InMLDv2Reports            *float64
+	OutDestUnreachs           *float64
+	OutPktTooBigs             *float64
+	OutTimeExcds              *float64
+	OutParmProblems           *float64
+	OutEchos                  *float64
+	OutEchoReplies            *float64
+	OutGroupMembQueries       *float64
+	OutGroupMembResponses     *float64
+	OutGroupMembReductions    *float64
+	OutRouterSolicits         *float64
+	OutRouterAdvertisements   *float64
+	OutNeighborSolicits       *float64
+	OutNeighborAdvertisements *float64
+	OutRedirects              *float64
+	OutMLDv2Reports           *float64
+	InType1                   *float64
+	InType134                 *float64
+	InType135                 *float64
+	InType136                 *float64
+	InType143                 *float64
+	OutType133                *float64
+	OutType135                *float64
+	OutType136                *float64
+	OutType143                *float64
+}
+
+type Udp6 struct { // nolint:revive
+	InDatagrams  *float64
+	NoPorts      *float64
+	InErrors     *float64
+	OutDatagrams *float64
+	RcvbufErrors *float64
+	SndbufErrors *float64
+	InCsumErrors *float64
+	IgnoredMulti *float64
+}
+
+type UdpLite6 struct { // nolint:revive
+	InDatagrams  *float64
+	NoPorts      *float64
+	InErrors     *float64
+	OutDatagrams *float64
+	RcvbufErrors *float64
+	SndbufErrors *float64
+	InCsumErrors *float64
+}
+
+func (p Proc) Snmp6() (ProcSnmp6, error) {
+	filename := p.path("net/snmp6")
+	data, err := util.ReadFileNoStat(filename)
+	if err != nil {
+		// On systems with IPv6 disabled, this file won't exist.
+		// Do nothing.
+		if errors.Is(err, os.ErrNotExist) {
+			return ProcSnmp6{PID: p.PID}, nil
+		}
+
+		return ProcSnmp6{PID: p.PID}, err
+	}
+
+	procSnmp6, err := parseSNMP6Stats(bytes.NewReader(data))
+	procSnmp6.PID = p.PID
+	return procSnmp6, err
+}
+
+// parseSnmp6 parses the metrics from proc/<pid>/net/snmp6 file
+// and returns a map contains those metrics.
+func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
+	var (
+		scanner   = bufio.NewScanner(r)
+		procSnmp6 = ProcSnmp6{}
+	)
+
+	for scanner.Scan() {
+		stat := strings.Fields(scanner.Text())
+		if len(stat) < 2 {
+			continue
+		}
+		// Expect to have "6" in metric name, skip line otherwise
+		if sixIndex := strings.Index(stat[0], "6"); sixIndex != -1 {
+			protocol := stat[0][:sixIndex+1]
+			key := stat[0][sixIndex+1:]
+			value, err := strconv.ParseFloat(stat[1], 64)
+			if err != nil {
+				return procSnmp6, err
+			}
+
+			switch protocol {
+			case "Ip6":
+				switch key {
+				case "InReceives":
+					procSnmp6.InReceives = &value
+				case "InHdrErrors":
+					procSnmp6.InHdrErrors = &value
+				case "InTooBigErrors":
+					procSnmp6.InTooBigErrors = &value
+				case "InNoRoutes":
+					procSnmp6.InNoRoutes = &value
+				case "InAddrErrors":
+					procSnmp6.InAddrErrors = &value
+				case "InUnknownProtos":
+					procSnmp6.InUnknownProtos = &value
+				case "InTruncatedPkts":
+					procSnmp6.InTruncatedPkts = &value
+				case "InDiscards":
+					procSnmp6.InDiscards = &value
+				case "InDelivers":
+					procSnmp6.InDelivers = &value
+				case "OutForwDatagrams":
+					procSnmp6.OutForwDatagrams = &value
+				case "OutRequests":
+					procSnmp6.OutRequests = &value
+				case "OutDiscards":
+					procSnmp6.OutDiscards = &value
+				case "OutNoRoutes":
+					procSnmp6.OutNoRoutes = &value
+				case "ReasmTimeout":
+					procSnmp6.ReasmTimeout = &value
+				case "ReasmReqds":
+					procSnmp6.ReasmReqds = &value
+				case "ReasmOKs":
+					procSnmp6.ReasmOKs = &value
+				case "ReasmFails":
+					procSnmp6.ReasmFails = &value
+				case "FragOKs":
+					procSnmp6.FragOKs = &value
+				case "FragFails":
+					procSnmp6.FragFails = &value
+				case "FragCreates":
+					procSnmp6.FragCreates = &value
+				case "InMcastPkts":
+					procSnmp6.InMcastPkts = &value
+				case "OutMcastPkts":
+					procSnmp6.OutMcastPkts = &value
+				case "InOctets":
+					procSnmp6.InOctets = &value
+				case "OutOctets":
+					procSnmp6.OutOctets = &value
+				case "InMcastOctets":
+					procSnmp6.InMcastOctets = &value
+				case "OutMcastOctets":
+					procSnmp6.OutMcastOctets = &value
+				case "InBcastOctets":
+					procSnmp6.InBcastOctets = &value
+				case "OutBcastOctets":
+					procSnmp6.OutBcastOctets = &value
+				case "InNoECTPkts":
+					procSnmp6.InNoECTPkts = &value
+				case "InECT1Pkts":
+					procSnmp6.InECT1Pkts = &value
+				case "InECT0Pkts":
+					procSnmp6.InECT0Pkts = &value
+				case "InCEPkts":
+					procSnmp6.InCEPkts = &value
+
+				}
+			case "Icmp6":
+				switch key {
+				case "InMsgs":
+					procSnmp6.InMsgs = &value
+				case "InErrors":
+					procSnmp6.Icmp6.InErrors = &value
+				case "OutMsgs":
+					procSnmp6.OutMsgs = &value
+				case "OutErrors":
+					procSnmp6.OutErrors = &value
+				case "InCsumErrors":
+					procSnmp6.Icmp6.InCsumErrors = &value
+				case "InDestUnreachs":
+					procSnmp6.InDestUnreachs = &value
+				case "InPktTooBigs":
+					procSnmp6.InPktTooBigs = &value
+				case "InTimeExcds":
+					procSnmp6.InTimeExcds = &value
+				case "InParmProblems":
+					procSnmp6.InParmProblems = &value
+				case "InEchos":
+					procSnmp6.InEchos = &value
+				case "InEchoReplies":
+					procSnmp6.InEchoReplies = &value
+				case "InGroupMembQueries":
+					procSnmp6.InGroupMembQueries = &value
+				case "InGroupMembResponses":
+					procSnmp6.InGroupMembResponses = &value
+				case "InGroupMembReductions":
+					procSnmp6.InGroupMembReductions = &value
+				case "InRouterSolicits":
+					procSnmp6.InRouterSolicits = &value
+				case "InRouterAdvertisements":
+					procSnmp6.InRouterAdvertisements = &value
+				case "InNeighborSolicits":
+					procSnmp6.InNeighborSolicits = &value
+				case "InNeighborAdvertisements":
+					procSnmp6.InNeighborAdvertisements = &value
+				case "InRedirects":
+					procSnmp6.InRedirects = &value
+				case "InMLDv2Reports":
+					procSnmp6.InMLDv2Reports = &value
+				case "OutDestUnreachs":
+					procSnmp6.OutDestUnreachs = &value
+				case "OutPktTooBigs":
+					procSnmp6.OutPktTooBigs = &value
+				case "OutTimeExcds":
+					procSnmp6.OutTimeExcds = &value
+				case "OutParmProblems":
+					procSnmp6.OutParmProblems = &value
+				case "OutEchos":
+					procSnmp6.OutEchos = &value
+				case "OutEchoReplies":
+					procSnmp6.OutEchoReplies = &value
+				case "OutGroupMembQueries":
+					procSnmp6.OutGroupMembQueries = &value
+				case "OutGroupMembResponses":
+					procSnmp6.OutGroupMembResponses = &value
+				case "OutGroupMembReductions":
+					procSnmp6.OutGroupMembReductions = &value
+				case "OutRouterSolicits":
+					procSnmp6.OutRouterSolicits = &value
+				case "OutRouterAdvertisements":
+					procSnmp6.OutRouterAdvertisements = &value
+				case "OutNeighborSolicits":
+					procSnmp6.OutNeighborSolicits = &value
+				case "OutNeighborAdvertisements":
+					procSnmp6.OutNeighborAdvertisements = &value
+				case "OutRedirects":
+					procSnmp6.OutRedirects = &value
+				case "OutMLDv2Reports":
+					procSnmp6.OutMLDv2Reports = &value
+				case "InType1":
+					procSnmp6.InType1 = &value
+				case "InType134":
+					procSnmp6.InType134 = &value
+				case "InType135":
+					procSnmp6.InType135 = &value
+				case "InType136":
+					procSnmp6.InType136 = &value
+				case "InType143":
+					procSnmp6.InType143 = &value
+				case "OutType133":
+					procSnmp6.OutType133 = &value
+				case "OutType135":
+					procSnmp6.OutType135 = &value
+				case "OutType136":
+					procSnmp6.OutType136 = &value
+				case "OutType143":
+					procSnmp6.OutType143 = &value
+				}
+			case "Udp6":
+				switch key {
+				case "InDatagrams":
+					procSnmp6.Udp6.InDatagrams = &value
+				case "NoPorts":
+					procSnmp6.Udp6.NoPorts = &value
+				case "InErrors":
+					procSnmp6.Udp6.InErrors = &value
+				case "OutDatagrams":
+					procSnmp6.Udp6.OutDatagrams = &value
+				case "RcvbufErrors":
+					procSnmp6.Udp6.RcvbufErrors = &value
+				case "SndbufErrors":
+					procSnmp6.Udp6.SndbufErrors = &value
+				case "InCsumErrors":
+					procSnmp6.Udp6.InCsumErrors = &value
+				case "IgnoredMulti":
+					procSnmp6.IgnoredMulti = &value
+				}
+			case "UdpLite6":
+				switch key {
+				case "InDatagrams":
+					procSnmp6.UdpLite6.InDatagrams = &value
+				case "NoPorts":
+					procSnmp6.UdpLite6.NoPorts = &value
+				case "InErrors":
+					procSnmp6.UdpLite6.InErrors = &value
+				case "OutDatagrams":
+					procSnmp6.UdpLite6.OutDatagrams = &value
+				case "RcvbufErrors":
+					procSnmp6.UdpLite6.RcvbufErrors = &value
+				case "SndbufErrors":
+					procSnmp6.UdpLite6.SndbufErrors = &value
+				case "InCsumErrors":
+					procSnmp6.UdpLite6.InCsumErrors = &value
+				}
+			}
+		}
+	}
+	return procSnmp6, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
index 67ca0e9..06a8d93 100644
--- a/vendor/github.com/prometheus/procfs/proc_stat.go
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -18,7 +18,6 @@
 	"fmt"
 	"os"
 
-	"github.com/prometheus/procfs/internal/fs"
 	"github.com/prometheus/procfs/internal/util"
 )
 
@@ -81,10 +80,10 @@
 	STime uint
 	// Amount of time that this process's waited-for children have been
 	// scheduled in user mode, measured in clock ticks.
-	CUTime uint
+	CUTime int
 	// Amount of time that this process's waited-for children have been
 	// scheduled in kernel mode, measured in clock ticks.
-	CSTime uint
+	CSTime int
 	// For processes running a real-time scheduling policy, this is the negated
 	// scheduling priority, minus one.
 	Priority int
@@ -100,13 +99,29 @@
 	VSize uint
 	// Resident set size in pages.
 	RSS int
+	// Soft limit in bytes on the rss of the process.
+	RSSLimit uint64
+	// CPU number last executed on.
+	Processor uint
+	// Real-time scheduling priority, a number in the range 1 to 99 for processes
+	// scheduled under a real-time policy, or 0, for non-real-time processes.
+	RTPriority uint
+	// Scheduling policy.
+	Policy uint
+	// Aggregated block I/O delays, measured in clock ticks (centiseconds).
+	DelayAcctBlkIOTicks uint64
+	// Guest time of the process (time spent running a virtual CPU for a guest
+	// operating system), measured in clock ticks.
+	GuestTime int
+	// Guest time of the process's children, measured in clock ticks.
+	CGuestTime int
 
-	proc fs.FS
+	proc FS
 }
 
 // NewStat returns the current status information of the process.
 //
-// Deprecated: use p.Stat() instead
+// Deprecated: Use p.Stat() instead.
 func (p Proc) NewStat() (ProcStat, error) {
 	return p.Stat()
 }
@@ -119,7 +134,8 @@
 	}
 
 	var (
-		ignore int
+		ignoreInt64  int64
+		ignoreUint64 uint64
 
 		s = ProcStat{PID: p.PID, proc: p.fs}
 		l = bytes.Index(data, []byte("("))
@@ -127,10 +143,15 @@
 	)
 
 	if l < 0 || r < 0 {
-		return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data)
+		return ProcStat{}, fmt.Errorf("%w: unexpected format, couldn't extract comm %q", ErrFileParse, data)
 	}
 
 	s.Comm = string(data[l+1 : r])
+
+	// Check the following resources for the details about the particular stat
+	// fields and their data types:
+	// * https://man7.org/linux/man-pages/man5/proc.5.html
+	// * https://man7.org/linux/man-pages/man3/scanf.3.html
 	_, err = fmt.Fscan(
 		bytes.NewBuffer(data[r+2:]),
 		&s.State,
@@ -151,10 +172,30 @@
 		&s.Priority,
 		&s.Nice,
 		&s.NumThreads,
-		&ignore,
+		&ignoreInt64,
 		&s.Starttime,
 		&s.VSize,
 		&s.RSS,
+		&s.RSSLimit,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreInt64,
+		&s.Processor,
+		&s.RTPriority,
+		&s.Policy,
+		&s.DelayAcctBlkIOTicks,
+		&s.GuestTime,
+		&s.CGuestTime,
 	)
 	if err != nil {
 		return ProcStat{}, err
@@ -175,8 +216,7 @@
 
 // StartTime returns the unix timestamp of the process in seconds.
 func (s ProcStat) StartTime() (float64, error) {
-	fs := FS{proc: s.proc}
-	stat, err := fs.Stat()
+	stat, err := s.proc.Stat()
 	if err != nil {
 		return 0, err
 	}
diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go
index 6edd833..dd8aa56 100644
--- a/vendor/github.com/prometheus/procfs/proc_status.go
+++ b/vendor/github.com/prometheus/procfs/proc_status.go
@@ -15,6 +15,8 @@
 
 import (
 	"bytes"
+	"math/bits"
+	"sort"
 	"strconv"
 	"strings"
 
@@ -22,7 +24,7 @@
 )
 
 // ProcStatus provides status information about the process,
-// read from /proc/[pid]/stat.
+// read from /proc/[pid]/status.
 type ProcStatus struct {
 	// The process ID.
 	PID int
@@ -31,39 +33,41 @@
 
 	// Thread group ID.
 	TGID int
+	// List of Pid namespace.
+	NSpids []uint64
 
 	// Peak virtual memory size.
-	VmPeak uint64 // nolint:golint
+	VmPeak uint64 // nolint:revive
 	// Virtual memory size.
-	VmSize uint64 // nolint:golint
+	VmSize uint64 // nolint:revive
 	// Locked memory size.
-	VmLck uint64 // nolint:golint
+	VmLck uint64 // nolint:revive
 	// Pinned memory size.
-	VmPin uint64 // nolint:golint
+	VmPin uint64 // nolint:revive
 	// Peak resident set size.
-	VmHWM uint64 // nolint:golint
+	VmHWM uint64 // nolint:revive
 	// Resident set size (sum of RssAnnon RssFile and RssShmem).
-	VmRSS uint64 // nolint:golint
+	VmRSS uint64 // nolint:revive
 	// Size of resident anonymous memory.
-	RssAnon uint64 // nolint:golint
+	RssAnon uint64 // nolint:revive
 	// Size of resident file mappings.
-	RssFile uint64 // nolint:golint
+	RssFile uint64 // nolint:revive
 	// Size of resident shared memory.
-	RssShmem uint64 // nolint:golint
+	RssShmem uint64 // nolint:revive
 	// Size of data segments.
-	VmData uint64 // nolint:golint
+	VmData uint64 // nolint:revive
 	// Size of stack segments.
-	VmStk uint64 // nolint:golint
+	VmStk uint64 // nolint:revive
 	// Size of text segments.
-	VmExe uint64 // nolint:golint
+	VmExe uint64 // nolint:revive
 	// Shared library code size.
-	VmLib uint64 // nolint:golint
+	VmLib uint64 // nolint:revive
 	// Page table entries size.
-	VmPTE uint64 // nolint:golint
+	VmPTE uint64 // nolint:revive
 	// Size of second-level page tables.
-	VmPMD uint64 // nolint:golint
+	VmPMD uint64 // nolint:revive
 	// Swapped-out virtual memory size by anonymous private.
-	VmSwap uint64 // nolint:golint
+	VmSwap uint64 // nolint:revive
 	// Size of hugetlb memory portions
 	HugetlbPages uint64
 
@@ -73,9 +77,12 @@
 	NonVoluntaryCtxtSwitches uint64
 
 	// UIDs of the process (Real, effective, saved set, and filesystem UIDs)
-	UIDs [4]string
+	UIDs [4]uint64
 	// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
-	GIDs [4]string
+	GIDs [4]uint64
+
+	// CpusAllowedList: List of cpu cores processes are allowed to run on.
+	CpusAllowedList []uint64
 }
 
 // NewStatus returns the current status information of the process.
@@ -96,10 +103,10 @@
 		kv := strings.SplitN(line, ":", 2)
 
 		// removes spaces
-		k := string(strings.TrimSpace(kv[0]))
-		v := string(strings.TrimSpace(kv[1]))
+		k := strings.TrimSpace(kv[0])
+		v := strings.TrimSpace(kv[1])
 		// removes "kB"
-		v = string(bytes.Trim([]byte(v), " kB"))
+		v = strings.TrimSuffix(v, " kB")
 
 		// value to int when possible
 		// we can skip error check here, 'cause vKBytes is not used when value is a string
@@ -107,22 +114,43 @@
 		// convert kB to B
 		vBytes := vKBytes * 1024
 
-		s.fillStatus(k, v, vKBytes, vBytes)
+		err = s.fillStatus(k, v, vKBytes, vBytes)
+		if err != nil {
+			return ProcStatus{}, err
+		}
 	}
 
 	return s, nil
 }
 
-func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) {
+func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error {
 	switch k {
 	case "Tgid":
 		s.TGID = int(vUint)
 	case "Name":
 		s.Name = vString
 	case "Uid":
-		copy(s.UIDs[:], strings.Split(vString, "\t"))
+		var err error
+		for i, v := range strings.Split(vString, "\t") {
+			s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
+			if err != nil {
+				return err
+			}
+		}
 	case "Gid":
-		copy(s.GIDs[:], strings.Split(vString, "\t"))
+		var err error
+		for i, v := range strings.Split(vString, "\t") {
+			s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
+			if err != nil {
+				return err
+			}
+		}
+	case "NSpid":
+		nspids, err := calcNSPidsList(vString)
+		if err != nil {
+			return err
+		}
+		s.NSpids = nspids
 	case "VmPeak":
 		s.VmPeak = vUintBytes
 	case "VmSize":
@@ -161,10 +189,54 @@
 		s.VoluntaryCtxtSwitches = vUint
 	case "nonvoluntary_ctxt_switches":
 		s.NonVoluntaryCtxtSwitches = vUint
+	case "Cpus_allowed_list":
+		s.CpusAllowedList = calcCpusAllowedList(vString)
 	}
+
+	return nil
 }
 
 // TotalCtxtSwitches returns the total context switch.
 func (s ProcStatus) TotalCtxtSwitches() uint64 {
 	return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
 }
+
+func calcCpusAllowedList(cpuString string) []uint64 {
+	s := strings.Split(cpuString, ",")
+
+	var g []uint64
+
+	for _, cpu := range s {
+		// parse cpu ranges, example: 1-3=[1,2,3]
+		if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 {
+			startCPU, _ := strconv.ParseUint(l[0], 10, 64)
+			endCPU, _ := strconv.ParseUint(l[1], 10, 64)
+
+			for i := startCPU; i <= endCPU; i++ {
+				g = append(g, i)
+			}
+		} else if len(l) == 1 {
+			cpu, _ := strconv.ParseUint(l[0], 10, 64)
+			g = append(g, cpu)
+		}
+
+	}
+
+	sort.Slice(g, func(i, j int) bool { return g[i] < g[j] })
+	return g
+}
+
+func calcNSPidsList(nspidsString string) ([]uint64, error) {
+	s := strings.Split(nspidsString, "\t")
+	var nspids []uint64
+
+	for _, nspid := range s {
+		nspid, err := strconv.ParseUint(nspid, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+		nspids = append(nspids, nspid)
+	}
+
+	return nspids, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go
new file mode 100644
index 0000000..3810d1a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_sys.go
@@ -0,0 +1,51 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+func sysctlToPath(sysctl string) string {
+	return strings.ReplaceAll(sysctl, ".", "/")
+}
+
+func (fs FS) SysctlStrings(sysctl string) ([]string, error) {
+	value, err := util.SysReadFile(fs.proc.Path("sys", sysctlToPath(sysctl)))
+	if err != nil {
+		return nil, err
+	}
+	return strings.Fields(value), nil
+
+}
+
+func (fs FS) SysctlInts(sysctl string) ([]int, error) {
+	fields, err := fs.SysctlStrings(sysctl)
+	if err != nil {
+		return nil, err
+	}
+
+	values := make([]int, len(fields))
+	for i, f := range fields {
+		vp := util.NewValueParser(f)
+		values[i] = vp.Int()
+		if err := vp.Err(); err != nil {
+			return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
+		}
+	}
+	return values, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go
index 2822816..5f7f32d 100644
--- a/vendor/github.com/prometheus/procfs/schedstat.go
+++ b/vendor/github.com/prometheus/procfs/schedstat.go
@@ -40,7 +40,7 @@
 	CPUs []*SchedstatCPU
 }
 
-// SchedstatCPU contains the values from one "cpu<N>" line
+// SchedstatCPU contains the values from one "cpu<N>" line.
 type SchedstatCPU struct {
 	CPUNum string
 
@@ -49,14 +49,14 @@
 	RunTimeslices      uint64
 }
 
-// ProcSchedstat contains the values from /proc/<pid>/schedstat
+// ProcSchedstat contains the values from `/proc/<pid>/schedstat`.
 type ProcSchedstat struct {
 	RunningNanoseconds uint64
 	WaitingNanoseconds uint64
 	RunTimeslices      uint64
 }
 
-// Schedstat reads data from /proc/schedstat
+// Schedstat reads data from `/proc/schedstat`.
 func (fs FS) Schedstat() (*Schedstat, error) {
 	file, err := os.Open(fs.proc.Path("schedstat"))
 	if err != nil {
diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go
index 7896fd7..8611c90 100644
--- a/vendor/github.com/prometheus/procfs/slab.go
+++ b/vendor/github.com/prometheus/procfs/slab.go
@@ -68,7 +68,7 @@
 	l := slabSpace.ReplaceAllString(line, " ")
 	s := strings.Split(l, " ")
 	if len(s) != 16 {
-		return nil, fmt.Errorf("unable to parse: %q", line)
+		return nil, fmt.Errorf("%w: unable to parse: %q", ErrFileParse, line)
 	}
 	var err error
 	i := &Slab{Name: s[0]}
@@ -137,7 +137,7 @@
 	return s, nil
 }
 
-// SlabInfo reads data from /proc/slabinfo
+// SlabInfo reads data from `/proc/slabinfo`.
 func (fs FS) SlabInfo() (SlabInfo, error) {
 	// TODO: Consider passing options to allow for parsing different
 	// slabinfo versions. However, slabinfo 2.1 has been stable since
diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go
new file mode 100644
index 0000000..403e6ae
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/softirqs.go
@@ -0,0 +1,160 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Softirqs represents the softirq statistics.
+type Softirqs struct {
+	Hi      []uint64
+	Timer   []uint64
+	NetTx   []uint64
+	NetRx   []uint64
+	Block   []uint64
+	IRQPoll []uint64
+	Tasklet []uint64
+	Sched   []uint64
+	HRTimer []uint64
+	RCU     []uint64
+}
+
+func (fs FS) Softirqs() (Softirqs, error) {
+	fileName := fs.proc.Path("softirqs")
+	data, err := util.ReadFileNoStat(fileName)
+	if err != nil {
+		return Softirqs{}, err
+	}
+
+	reader := bytes.NewReader(data)
+
+	return parseSoftirqs(reader)
+}
+
+func parseSoftirqs(r io.Reader) (Softirqs, error) {
+	var (
+		softirqs = Softirqs{}
+		scanner  = bufio.NewScanner(r)
+	)
+
+	if !scanner.Scan() {
+		return Softirqs{}, fmt.Errorf("%w: softirqs empty", ErrFileRead)
+	}
+
+	for scanner.Scan() {
+		parts := strings.Fields(scanner.Text())
+		var err error
+
+		// require at least one cpu
+		if len(parts) < 2 {
+			continue
+		}
+		switch parts[0] {
+		case "HI:":
+			perCPU := parts[1:]
+			softirqs.Hi = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "TIMER:":
+			perCPU := parts[1:]
+			softirqs.Timer = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "NET_TX:":
+			perCPU := parts[1:]
+			softirqs.NetTx = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "NET_RX:":
+			perCPU := parts[1:]
+			softirqs.NetRx = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "BLOCK:":
+			perCPU := parts[1:]
+			softirqs.Block = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "IRQ_POLL:":
+			perCPU := parts[1:]
+			softirqs.IRQPoll = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "TASKLET:":
+			perCPU := parts[1:]
+			softirqs.Tasklet = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "SCHED:":
+			perCPU := parts[1:]
+			softirqs.Sched = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "HRTIMER:":
+			perCPU := parts[1:]
+			softirqs.HRTimer = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "RCU:":
+			perCPU := parts[1:]
+			softirqs.RCU = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		}
+	}
+
+	if err := scanner.Err(); err != nil {
+		return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err)
+	}
+
+	return softirqs, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
index 6d87275..e36b41c 100644
--- a/vendor/github.com/prometheus/procfs/stat.go
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -41,7 +41,7 @@
 
 // SoftIRQStat represent the softirq statistics as exported in the procfs stat file.
 // A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html
-// It is possible to get per-cpu stats by reading /proc/softirqs
+// It is possible to get per-cpu stats by reading `/proc/softirqs`.
 type SoftIRQStat struct {
 	Hi          uint64
 	Timer       uint64
@@ -62,7 +62,7 @@
 	// Summed up cpu statistics.
 	CPUTotal CPUStat
 	// Per-CPU statistics.
-	CPU []CPUStat
+	CPU map[int64]CPUStat
 	// Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
 	IRQTotal uint64
 	// Number of times a numbered IRQ was triggered.
@@ -93,10 +93,10 @@
 		&cpuStat.Guest, &cpuStat.GuestNice)
 
 	if err != nil && err != io.EOF {
-		return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err)
+		return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
 	}
 	if count == 0 {
-		return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line)
+		return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line)
 	}
 
 	cpuStat.User /= userHZ
@@ -116,7 +116,7 @@
 
 	cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
 	if err != nil {
-		return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err)
+		return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
 	}
 
 	return cpuStat, cpuID, nil
@@ -136,7 +136,7 @@
 		&softIRQStat.Hrtimer, &softIRQStat.Rcu)
 
 	if err != nil {
-		return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err)
+		return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
 	}
 
 	return softIRQStat, total, nil
@@ -145,7 +145,7 @@
 // NewStat returns information about current cpu/process statistics.
 // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
 //
-// Deprecated: use fs.Stat() instead
+// Deprecated: Use fs.Stat() instead.
 func NewStat() (Stat, error) {
 	fs, err := NewFS(fs.DefaultProcMountPoint)
 	if err != nil {
@@ -155,25 +155,42 @@
 }
 
 // NewStat returns information about current cpu/process statistics.
-// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt
 //
-// Deprecated: use fs.Stat() instead
+// Deprecated: Use fs.Stat() instead.
 func (fs FS) NewStat() (Stat, error) {
 	return fs.Stat()
 }
 
 // Stat returns information about current cpu/process statistics.
-// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt
 func (fs FS) Stat() (Stat, error) {
 	fileName := fs.proc.Path("stat")
 	data, err := util.ReadFileNoStat(fileName)
 	if err != nil {
 		return Stat{}, err
 	}
+	procStat, err := parseStat(bytes.NewReader(data), fileName)
+	if err != nil {
+		return Stat{}, err
+	}
+	return procStat, nil
+}
 
-	stat := Stat{}
+// parseStat parses the metrics from /proc/[pid]/stat.
+func parseStat(r io.Reader, fileName string) (Stat, error) {
+	var (
+		scanner = bufio.NewScanner(r)
+		stat    = Stat{
+			CPU: make(map[int64]CPUStat),
+		}
+		err error
+	)
 
-	scanner := bufio.NewScanner(bytes.NewReader(data))
+	// Increase default scanner buffer to handle very long `intr` lines.
+	buf := make([]byte, 0, 8*1024)
+	scanner.Buffer(buf, 1024*1024)
+
 	for scanner.Scan() {
 		line := scanner.Text()
 		parts := strings.Fields(scanner.Text())
@@ -184,34 +201,34 @@
 		switch {
 		case parts[0] == "btime":
 			if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
-				return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err)
+				return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
 			}
 		case parts[0] == "intr":
 			if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
-				return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err)
+				return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
 			}
 			numberedIRQs := parts[2:]
 			stat.IRQ = make([]uint64, len(numberedIRQs))
 			for i, count := range numberedIRQs {
 				if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
-					return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err)
+					return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
 				}
 			}
 		case parts[0] == "ctxt":
 			if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
-				return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err)
+				return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
 			}
 		case parts[0] == "processes":
 			if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
-				return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err)
+				return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
 			}
 		case parts[0] == "procs_running":
 			if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
-				return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err)
+				return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
 			}
 		case parts[0] == "procs_blocked":
 			if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
-				return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err)
+				return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
 			}
 		case parts[0] == "softirq":
 			softIRQStats, total, err := parseSoftIRQStat(line)
@@ -228,16 +245,13 @@
 			if cpuID == -1 {
 				stat.CPUTotal = cpuStat
 			} else {
-				for int64(len(stat.CPU)) <= cpuID {
-					stat.CPU = append(stat.CPU, CPUStat{})
-				}
 				stat.CPU[cpuID] = cpuStat
 			}
 		}
 	}
 
 	if err := scanner.Err(); err != nil {
-		return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err)
+		return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err)
 	}
 
 	return stat, nil
diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go
index 15edc22..65fec83 100644
--- a/vendor/github.com/prometheus/procfs/swaps.go
+++ b/vendor/github.com/prometheus/procfs/swaps.go
@@ -64,7 +64,7 @@
 	swapFields := strings.Fields(swapString)
 	swapLength := len(swapFields)
 	if swapLength < 5 {
-		return nil, fmt.Errorf("too few fields in swap string: %s", swapString)
+		return nil, fmt.Errorf("%w: too few fields in swap string: %s", ErrFileParse, swapString)
 	}
 
 	swap := &Swap{
@@ -74,15 +74,15 @@
 
 	swap.Size, err = strconv.Atoi(swapFields[2])
 	if err != nil {
-		return nil, fmt.Errorf("invalid swap size: %s", swapFields[2])
+		return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
 	}
 	swap.Used, err = strconv.Atoi(swapFields[3])
 	if err != nil {
-		return nil, fmt.Errorf("invalid swap used: %s", swapFields[3])
+		return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
 	}
 	swap.Priority, err = strconv.Atoi(swapFields[4])
 	if err != nil {
-		return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4])
+		return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
 	}
 
 	return swap, nil
diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go
new file mode 100644
index 0000000..80e0e94
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/thread.go
@@ -0,0 +1,80 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"fmt"
+	"os"
+	"strconv"
+
+	fsi "github.com/prometheus/procfs/internal/fs"
+)
+
+// Provide access to /proc/PID/task/TID files, for thread specific values. Since
+// such files have the same structure as /proc/PID/ ones, the data structures
+// and the parsers for the latter may be reused.
+
+// AllThreads returns a list of all currently available threads under /proc/PID.
+func AllThreads(pid int) (Procs, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil {
+		return Procs{}, err
+	}
+	return fs.AllThreads(pid)
+}
+
+// AllThreads returns a list of all currently available threads for PID.
+func (fs FS) AllThreads(pid int) (Procs, error) {
+	taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
+	d, err := os.Open(taskPath)
+	if err != nil {
+		return Procs{}, err
+	}
+	defer d.Close()
+
+	names, err := d.Readdirnames(-1)
+	if err != nil {
+		return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err)
+	}
+
+	t := Procs{}
+	for _, n := range names {
+		tid, err := strconv.ParseInt(n, 10, 64)
+		if err != nil {
+			continue
+		}
+
+		t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.isReal}})
+	}
+
+	return t, nil
+}
+
+// Thread returns a process for a given PID, TID.
+func (fs FS) Thread(pid, tid int) (Proc, error) {
+	taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
+	if _, err := os.Stat(taskPath); err != nil {
+		return Proc{}, err
+	}
+	return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.isReal}}, nil
+}
+
+// Thread returns a process for a given TID of Proc.
+func (proc Proc) Thread(tid int) (Proc, error) {
+	tfs := FS{fsi.FS(proc.path("task")), proc.fs.isReal}
+	if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil {
+		return Proc{}, err
+	}
+	return Proc{PID: tid, fs: tfs}, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go
index cb13891..51c49d8 100644
--- a/vendor/github.com/prometheus/procfs/vm.go
+++ b/vendor/github.com/prometheus/procfs/vm.go
@@ -11,13 +11,13 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+//go:build !windows
 // +build !windows
 
 package procfs
 
 import (
 	"fmt"
-	"io/ioutil"
 	"os"
 	"path/filepath"
 	"strings"
@@ -26,10 +26,12 @@
 )
 
 // The VM interface is described at
-//   https://www.kernel.org/doc/Documentation/sysctl/vm.txt
+//
+//	https://www.kernel.org/doc/Documentation/sysctl/vm.txt
+//
 // Each setting is exposed as a single file.
 // Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
-// and numa_zonelist_order (deprecated) which is a string
+// and numa_zonelist_order (deprecated) which is a string.
 type VM struct {
 	AdminReserveKbytes        *int64   // /proc/sys/vm/admin_reserve_kbytes
 	BlockDump                 *int64   // /proc/sys/vm/block_dump
@@ -84,10 +86,10 @@
 		return nil, err
 	}
 	if !file.Mode().IsDir() {
-		return nil, fmt.Errorf("%s is not a directory", path)
+		return nil, fmt.Errorf("%w: %s is not a directory", ErrFileRead, path)
 	}
 
-	files, err := ioutil.ReadDir(path)
+	files, err := os.ReadDir(path)
 	if err != nil {
 		return nil, err
 	}
diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go
index 0b9bb67..e54d94b 100644
--- a/vendor/github.com/prometheus/procfs/zoneinfo.go
+++ b/vendor/github.com/prometheus/procfs/zoneinfo.go
@@ -11,6 +11,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+//go:build !windows
 // +build !windows
 
 package procfs
@@ -18,7 +19,7 @@
 import (
 	"bytes"
 	"fmt"
-	"io/ioutil"
+	"os"
 	"regexp"
 	"strings"
 
@@ -72,13 +73,13 @@
 // structs containing the relevant info.  More information available here:
 // https://www.kernel.org/doc/Documentation/sysctl/vm.txt
 func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
-	data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo"))
+	data, err := os.ReadFile(fs.proc.Path("zoneinfo"))
 	if err != nil {
-		return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err)
+		return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
 	}
 	zoneinfo, err := parseZoneinfo(data)
 	if err != nil {
-		return nil, fmt.Errorf("error parsing zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err)
+		return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
 	}
 	return zoneinfo, nil
 }
@@ -99,7 +100,6 @@
 				continue
 			}
 			if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") {
-				zoneinfoElement.Zone = ""
 				continue
 			}
 			parts := strings.Fields(strings.TrimSpace(line))