[VOL-5486] Upgrade library versions
Change-Id: I8b4e88699e03f44ee13e467867f45ae3f0a63c4b
Signed-off-by: Abhay Kumar <abhay.kumar@radisys.com>
diff --git a/vendor/go.uber.org/zap/.golangci.yml b/vendor/go.uber.org/zap/.golangci.yml
new file mode 100644
index 0000000..2346df1
--- /dev/null
+++ b/vendor/go.uber.org/zap/.golangci.yml
@@ -0,0 +1,77 @@
+output:
+ # Make output more digestible with quickfix in vim/emacs/etc.
+ sort-results: true
+ print-issued-lines: false
+
+linters:
+ # We'll track the golangci-lint default linters manually
+ # instead of letting them change without our control.
+ disable-all: true
+ enable:
+ # golangci-lint defaults:
+ - errcheck
+ - gosimple
+ - govet
+ - ineffassign
+ - staticcheck
+ - unused
+
+ # Our own extras:
+ - gofumpt
+ - nolintlint # lints nolint directives
+ - revive
+
+linters-settings:
+ govet:
+ # These govet checks are disabled by default, but they're useful.
+ enable:
+ - niliness
+ - reflectvaluecompare
+ - sortslice
+ - unusedwrite
+
+ errcheck:
+ exclude-functions:
+ # These methods can not fail.
+ # They operate on an in-memory buffer.
+ - (*go.uber.org/zap/buffer.Buffer).Write
+ - (*go.uber.org/zap/buffer.Buffer).WriteByte
+ - (*go.uber.org/zap/buffer.Buffer).WriteString
+
+ - (*go.uber.org/zap/zapio.Writer).Close
+ - (*go.uber.org/zap/zapio.Writer).Sync
+ - (*go.uber.org/zap/zapio.Writer).Write
+ # Write to zapio.Writer cannot fail,
+ # so io.WriteString on it cannot fail.
+ - io.WriteString(*go.uber.org/zap/zapio.Writer)
+
+ # Writing a plain string to a fmt.State cannot fail.
+ - io.WriteString(fmt.State)
+
+issues:
+ # Print all issues reported by all linters.
+ max-issues-per-linter: 0
+ max-same-issues: 0
+
+ # Don't ignore some of the issues that golangci-lint considers okay.
+ # This includes documenting all exported entities.
+ exclude-use-default: false
+
+ exclude-rules:
+ # Don't warn on unused parameters.
+ # Parameter names are useful; replacing them with '_' is undesirable.
+ - linters: [revive]
+ text: 'unused-parameter: parameter \S+ seems to be unused, consider removing or renaming it as _'
+
+ # staticcheck already has smarter checks for empty blocks.
+ # revive's empty-block linter has false positives.
+ # For example, as of writing this, the following is not allowed.
+ # for foo() { }
+ - linters: [revive]
+ text: 'empty-block: this block is empty, you can remove it'
+
+ # Ignore logger.Sync() errcheck failures in example_test.go
+ # since those are intended to be uncomplicated examples.
+ - linters: [errcheck]
+ path: example_test.go
+ text: 'Error return value of `logger.Sync` is not checked'
diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl
index 3154a1e..4fea302 100644
--- a/vendor/go.uber.org/zap/.readme.tmpl
+++ b/vendor/go.uber.org/zap/.readme.tmpl
@@ -1,7 +1,15 @@
# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+<div align="center">
+
Blazing fast, structured, leveled logging in Go.
+
+
+[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+</div>
+
## Installation
`go get -u go.uber.org/zap`
@@ -92,18 +100,18 @@
<hr>
-Released under the [MIT License](LICENSE.txt).
+Released under the [MIT License](LICENSE).
<sup id="footnote-versions">1</sup> In particular, keep in mind that we may be
benchmarking against slightly older versions of other packages. Versions are
-pinned in zap's [glide.lock][] file. [↩](#anchor-versions)
+pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions)
-[doc-img]: https://godoc.org/go.uber.org/zap?status.svg
-[doc]: https://godoc.org/go.uber.org/zap
-[ci-img]: https://travis-ci.com/uber-go/zap.svg?branch=master
-[ci]: https://travis-ci.com/uber-go/zap
+[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap
+[doc]: https://pkg.go.dev/go.uber.org/zap
+[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg
+[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml
[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg
[cov]: https://codecov.io/gh/uber-go/zap
[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks
-[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock
+[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod
diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md
index 6c32101..6d6cd5f 100644
--- a/vendor/go.uber.org/zap/CHANGELOG.md
+++ b/vendor/go.uber.org/zap/CHANGELOG.md
@@ -1,4 +1,176 @@
# Changelog
+All notable changes to this project will be documented in this file.
+
+This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## 1.27.0 (20 Feb 2024)
+Enhancements:
+* [#1378][]: Add `WithLazy` method for `SugaredLogger`.
+* [#1399][]: zaptest: Add `NewTestingWriter` for customizing TestingWriter with more flexibility than `NewLogger`.
+* [#1406][]: Add `Log`, `Logw`, `Logln` methods for `SugaredLogger`.
+* [#1416][]: Add `WithPanicHook` option for testing panic logs.
+
+Thanks to @defval, @dimmo, @arxeiss, and @MKrupauskas for their contributions to this release.
+
+[#1378]: https://github.com/uber-go/zap/pull/1378
+[#1399]: https://github.com/uber-go/zap/pull/1399
+[#1406]: https://github.com/uber-go/zap/pull/1406
+[#1416]: https://github.com/uber-go/zap/pull/1416
+
+## 1.26.0 (14 Sep 2023)
+Enhancements:
+* [#1297][]: Add Dict as a Field.
+* [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured
+context.
+* [#1350][]: String encoding is much (~50%) faster now.
+
+Thanks to @hhk7734, @jquirke, and @cdvr1993 for their contributions to this release.
+
+[#1297]: https://github.com/uber-go/zap/pull/1297
+[#1319]: https://github.com/uber-go/zap/pull/1319
+[#1350]: https://github.com/uber-go/zap/pull/1350
+
+## 1.25.0 (1 Aug 2023)
+
+This release contains several improvements including performance, API additions,
+and two new experimental packages whose APIs are unstable and may change in the
+future.
+
+Enhancements:
+* [#1246][]: Add `zap/exp/zapslog` package for integration with slog.
+* [#1273][]: Add `Name` to `Logger` which returns the Logger's name if one is set.
+* [#1281][]: Add `zap/exp/expfield` package which contains helper methods
+`Str` and `Strs` for constructing String-like zap.Fields.
+* [#1310][]: Reduce stack size on `Any`.
+
+Thanks to @knight42, @dzakaammar, @bcspragu, and @rexywork for their contributions
+to this release.
+
+[#1246]: https://github.com/uber-go/zap/pull/1246
+[#1273]: https://github.com/uber-go/zap/pull/1273
+[#1281]: https://github.com/uber-go/zap/pull/1281
+[#1310]: https://github.com/uber-go/zap/pull/1310
+
+## 1.24.0 (30 Nov 2022)
+
+Enhancements:
+* [#1148][]: Add `Level` to both `Logger` and `SugaredLogger` that reports the
+ current minimum enabled log level.
+* [#1185][]: `SugaredLogger` turns errors to zap.Error automatically.
+
+Thanks to @Abirdcfly, @craigpastro, @nnnkkk7, and @sashamelentyev for their
+contributions to this release.
+
+[#1148]: https://github.coml/uber-go/zap/pull/1148
+[#1185]: https://github.coml/uber-go/zap/pull/1185
+
+## 1.23.0 (24 Aug 2022)
+
+Enhancements:
+* [#1147][]: Add a `zapcore.LevelOf` function to determine the level of a
+ `LevelEnabler` or `Core`.
+* [#1155][]: Add `zap.Stringers` field constructor to log arrays of objects
+ that implement `String() string`.
+
+[#1147]: https://github.com/uber-go/zap/pull/1147
+[#1155]: https://github.com/uber-go/zap/pull/1155
+
+## 1.22.0 (8 Aug 2022)
+
+Enhancements:
+* [#1071][]: Add `zap.Objects` and `zap.ObjectValues` field constructors to log
+ arrays of objects. With these two constructors, you don't need to implement
+ `zapcore.ArrayMarshaler` for use with `zap.Array` if those objects implement
+ `zapcore.ObjectMarshaler`.
+* [#1079][]: Add `SugaredLogger.WithOptions` to build a copy of an existing
+ `SugaredLogger` with the provided options applied.
+* [#1080][]: Add `*ln` variants to `SugaredLogger` for each log level.
+ These functions provide a string joining behavior similar to `fmt.Println`.
+* [#1088][]: Add `zap.WithFatalHook` option to control the behavior of the
+ logger for `Fatal`-level log entries. This defaults to exiting the program.
+* [#1108][]: Add a `zap.Must` function that you can use with `NewProduction` or
+ `NewDevelopment` to panic if the system was unable to build the logger.
+* [#1118][]: Add a `Logger.Log` method that allows specifying the log level for
+ a statement dynamically.
+
+Thanks to @cardil, @craigpastro, @sashamelentyev, @shota3506, and @zhupeijun
+for their contributions to this release.
+
+[#1071]: https://github.com/uber-go/zap/pull/1071
+[#1079]: https://github.com/uber-go/zap/pull/1079
+[#1080]: https://github.com/uber-go/zap/pull/1080
+[#1088]: https://github.com/uber-go/zap/pull/1088
+[#1108]: https://github.com/uber-go/zap/pull/1108
+[#1118]: https://github.com/uber-go/zap/pull/1118
+
+## 1.21.0 (7 Feb 2022)
+
+Enhancements:
+* [#1047][]: Add `zapcore.ParseLevel` to parse a `Level` from a string.
+* [#1048][]: Add `zap.ParseAtomicLevel` to parse an `AtomicLevel` from a
+ string.
+
+Bugfixes:
+* [#1058][]: Fix panic in JSON encoder when `EncodeLevel` is unset.
+
+Other changes:
+* [#1052][]: Improve encoding performance when the `AddCaller` and
+ `AddStacktrace` options are used together.
+
+[#1047]: https://github.com/uber-go/zap/pull/1047
+[#1048]: https://github.com/uber-go/zap/pull/1048
+[#1052]: https://github.com/uber-go/zap/pull/1052
+[#1058]: https://github.com/uber-go/zap/pull/1058
+
+Thanks to @aerosol and @Techassi for their contributions to this release.
+
+## 1.20.0 (4 Jan 2022)
+
+Enhancements:
+* [#989][]: Add `EncoderConfig.SkipLineEnding` flag to disable adding newline
+ characters between log statements.
+* [#1039][]: Add `EncoderConfig.NewReflectedEncoder` field to customize JSON
+ encoding of reflected log fields.
+
+Bugfixes:
+* [#1011][]: Fix inaccurate precision when encoding complex64 as JSON.
+* [#554][], [#1017][]: Close JSON namespaces opened in `MarshalLogObject`
+ methods when the methods return.
+* [#1033][]: Avoid panicking in Sampler core if `thereafter` is zero.
+
+Other changes:
+* [#1028][]: Drop support for Go < 1.15.
+
+[#554]: https://github.com/uber-go/zap/pull/554
+[#989]: https://github.com/uber-go/zap/pull/989
+[#1011]: https://github.com/uber-go/zap/pull/1011
+[#1017]: https://github.com/uber-go/zap/pull/1017
+[#1028]: https://github.com/uber-go/zap/pull/1028
+[#1033]: https://github.com/uber-go/zap/pull/1033
+[#1039]: https://github.com/uber-go/zap/pull/1039
+
+Thanks to @psrajat, @lruggieri, @sammyrnycreal for their contributions to this release.
+
+## 1.19.1 (8 Sep 2021)
+
+Bugfixes:
+* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon.
+* [#1003][]: JSON: Fix inaccurate precision when encoding float32.
+
+[#1001]: https://github.com/uber-go/zap/pull/1001
+[#1003]: https://github.com/uber-go/zap/pull/1003
+
+## 1.19.0 (9 Aug 2021)
+
+Enhancements:
+* [#975][]: Avoid panicking in Sampler core if the level is out of bounds.
+* [#984][]: Reduce the size of BufferedWriteSyncer by aligning the fields
+ better.
+
+[#975]: https://github.com/uber-go/zap/pull/975
+[#984]: https://github.com/uber-go/zap/pull/984
+
+Thanks to @lancoLiu and @thockin for their contributions to this release.
## 1.18.1 (28 Jun 2021)
@@ -51,6 +223,16 @@
Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release.
+[#865]: https://github.com/uber-go/zap/pull/865
+[#867]: https://github.com/uber-go/zap/pull/867
+[#881]: https://github.com/uber-go/zap/pull/881
+[#903]: https://github.com/uber-go/zap/pull/903
+[#912]: https://github.com/uber-go/zap/pull/912
+[#913]: https://github.com/uber-go/zap/pull/913
+[#928]: https://github.com/uber-go/zap/pull/928
+[#931]: https://github.com/uber-go/zap/pull/931
+[#936]: https://github.com/uber-go/zap/pull/936
+
## 1.16.0 (1 Sep 2020)
Bugfixes:
@@ -72,6 +254,17 @@
Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release.
+[#629]: https://github.com/uber-go/zap/pull/629
+[#697]: https://github.com/uber-go/zap/pull/697
+[#828]: https://github.com/uber-go/zap/pull/828
+[#835]: https://github.com/uber-go/zap/pull/835
+[#843]: https://github.com/uber-go/zap/pull/843
+[#844]: https://github.com/uber-go/zap/pull/844
+[#852]: https://github.com/uber-go/zap/pull/852
+[#854]: https://github.com/uber-go/zap/pull/854
+[#861]: https://github.com/uber-go/zap/pull/861
+[#862]: https://github.com/uber-go/zap/pull/862
+
## 1.15.0 (23 Apr 2020)
Bugfixes:
@@ -88,6 +281,11 @@
Thanks to @danielbprice for their contributions to this release.
+[#804]: https://github.com/uber-go/zap/pull/804
+[#812]: https://github.com/uber-go/zap/pull/812
+[#806]: https://github.com/uber-go/zap/pull/806
+[#813]: https://github.com/uber-go/zap/pull/813
+
## 1.14.1 (14 Mar 2020)
Bugfixes:
@@ -100,6 +298,10 @@
Thanks to @YashishDua for their contributions to this release.
+[#791]: https://github.com/uber-go/zap/pull/791
+[#795]: https://github.com/uber-go/zap/pull/795
+[#799]: https://github.com/uber-go/zap/pull/799
+
## 1.14.0 (20 Feb 2020)
Enhancements:
@@ -110,6 +312,11 @@
Thanks to @caibirdme for their contributions to this release.
+[#771]: https://github.com/uber-go/zap/pull/771
+[#773]: https://github.com/uber-go/zap/pull/773
+[#775]: https://github.com/uber-go/zap/pull/775
+[#786]: https://github.com/uber-go/zap/pull/786
+
## 1.13.0 (13 Nov 2019)
Enhancements:
@@ -118,11 +325,15 @@
Thanks to @jbizzle for their contributions to this release.
+[#758]: https://github.com/uber-go/zap/pull/758
+
## 1.12.0 (29 Oct 2019)
Enhancements:
* [#751][]: Migrate to Go modules.
+[#751]: https://github.com/uber-go/zap/pull/751
+
## 1.11.0 (21 Oct 2019)
Enhancements:
@@ -131,6 +342,9 @@
Thanks to @juicemia, @uhthomas for their contributions to this release.
+[#725]: https://github.com/uber-go/zap/pull/725
+[#736]: https://github.com/uber-go/zap/pull/736
+
## 1.10.0 (29 Apr 2019)
Bugfixes:
@@ -148,13 +362,21 @@
Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions
to this release.
-## v1.9.1 (06 Aug 2018)
+[#657]: https://github.com/uber-go/zap/pull/657
+[#706]: https://github.com/uber-go/zap/pull/706
+[#610]: https://github.com/uber-go/zap/pull/610
+[#675]: https://github.com/uber-go/zap/pull/675
+[#704]: https://github.com/uber-go/zap/pull/704
+
+## 1.9.1 (06 Aug 2018)
Bugfixes:
* [#614][]: MapObjectEncoder should not ignore empty slices.
-## v1.9.0 (19 Jul 2018)
+[#614]: https://github.com/uber-go/zap/pull/614
+
+## 1.9.0 (19 Jul 2018)
Enhancements:
* [#602][]: Reduce number of allocations when logging with reflection.
@@ -163,7 +385,11 @@
Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and
@dimroc for their contributions to this release.
-## v1.8.0 (13 Apr 2018)
+[#602]: https://github.com/uber-go/zap/pull/602
+[#572]: https://github.com/uber-go/zap/pull/572
+[#606]: https://github.com/uber-go/zap/pull/606
+
+## 1.8.0 (13 Apr 2018)
Enhancements:
* [#508][]: Make log level configurable when redirecting the standard
@@ -176,19 +402,28 @@
Thanks to @DiSiqueira and @djui for their contributions to this release.
-## v1.7.1 (25 Sep 2017)
+[#508]: https://github.com/uber-go/zap/pull/508
+[#518]: https://github.com/uber-go/zap/pull/518
+[#577]: https://github.com/uber-go/zap/pull/577
+[#574]: https://github.com/uber-go/zap/pull/574
+
+## 1.7.1 (25 Sep 2017)
Bugfixes:
* [#504][]: Store strings when using AddByteString with the map encoder.
-## v1.7.0 (21 Sep 2017)
+[#504]: https://github.com/uber-go/zap/pull/504
+
+## 1.7.0 (21 Sep 2017)
Enhancements:
* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user
to specify the level of the logged messages.
-## v1.6.0 (30 Aug 2017)
+[#487]: https://github.com/uber-go/zap/pull/487
+
+## 1.6.0 (30 Aug 2017)
Enhancements:
@@ -196,7 +431,10 @@
* [#490][]: Add a `ContextMap` method to observer logs for simpler
field validation in tests.
-## v1.5.0 (22 Jul 2017)
+[#490]: https://github.com/uber-go/zap/pull/490
+[#491]: https://github.com/uber-go/zap/pull/491
+
+## 1.5.0 (22 Jul 2017)
Enhancements:
@@ -209,7 +447,12 @@
Thanks to @richard-tunein and @pavius for their contributions to this release.
-## v1.4.1 (08 Jun 2017)
+[#477]: https://github.com/uber-go/zap/pull/477
+[#465]: https://github.com/uber-go/zap/pull/465
+[#460]: https://github.com/uber-go/zap/pull/460
+[#470]: https://github.com/uber-go/zap/pull/470
+
+## 1.4.1 (08 Jun 2017)
This release fixes two bugs.
@@ -218,7 +461,10 @@
* [#435][]: Support a variety of case conventions when unmarshaling levels.
* [#444][]: Fix a panic in the observer.
-## v1.4.0 (12 May 2017)
+[#435]: https://github.com/uber-go/zap/pull/435
+[#444]: https://github.com/uber-go/zap/pull/444
+
+## 1.4.0 (12 May 2017)
This release adds a few small features and is fully backward-compatible.
@@ -230,7 +476,11 @@
* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a
variety of operations a bit simpler.
-## v1.3.0 (25 Apr 2017)
+[#424]: https://github.com/uber-go/zap/pull/424
+[#425]: https://github.com/uber-go/zap/pull/425
+[#431]: https://github.com/uber-go/zap/pull/431
+
+## 1.3.0 (25 Apr 2017)
This release adds an enhancement to zap's testing helpers as well as the
ability to marshal an AtomicLevel. It is fully backward-compatible.
@@ -241,7 +491,10 @@
particularly useful when testing the `SugaredLogger`.
* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`.
-## v1.2.0 (13 Apr 2017)
+[#415]: https://github.com/uber-go/zap/pull/415
+[#416]: https://github.com/uber-go/zap/pull/416
+
+## 1.2.0 (13 Apr 2017)
This release adds a gRPC compatibility wrapper. It is fully backward-compatible.
@@ -250,7 +503,9 @@
* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements
`grpclog.Logger`.
-## v1.1.0 (31 Mar 2017)
+[#402]: https://github.com/uber-go/zap/pull/402
+
+## 1.1.0 (31 Mar 2017)
This release fixes two bugs and adds some enhancements to zap's testing helpers.
It is fully backward-compatible.
@@ -267,7 +522,11 @@
Thanks to @moitias for contributing to this release.
-## v1.0.0 (14 Mar 2017)
+[#385]: https://github.com/uber-go/zap/pull/385
+[#396]: https://github.com/uber-go/zap/pull/396
+[#386]: https://github.com/uber-go/zap/pull/386
+
+## 1.0.0 (14 Mar 2017)
This is zap's first stable release. All exported APIs are now final, and no
further breaking changes will be made in the 1.x release series. Anyone using a
@@ -312,7 +571,21 @@
Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their
contributions to this release.
-## v1.0.0-rc.3 (7 Mar 2017)
+[#366]: https://github.com/uber-go/zap/pull/366
+[#364]: https://github.com/uber-go/zap/pull/364
+[#371]: https://github.com/uber-go/zap/pull/371
+[#362]: https://github.com/uber-go/zap/pull/362
+[#369]: https://github.com/uber-go/zap/pull/369
+[#347]: https://github.com/uber-go/zap/pull/347
+[#373]: https://github.com/uber-go/zap/pull/373
+[#348]: https://github.com/uber-go/zap/pull/348
+[#327]: https://github.com/uber-go/zap/pull/327
+[#376]: https://github.com/uber-go/zap/pull/376
+[#346]: https://github.com/uber-go/zap/pull/346
+[#365]: https://github.com/uber-go/zap/pull/365
+[#372]: https://github.com/uber-go/zap/pull/372
+
+## 1.0.0-rc.3 (7 Mar 2017)
This is the third release candidate for zap's stable release. There are no
breaking changes.
@@ -333,7 +606,12 @@
Thanks to @ansel1 and @suyash for their contributions to this release.
-## v1.0.0-rc.2 (21 Feb 2017)
+[#339]: https://github.com/uber-go/zap/pull/339
+[#307]: https://github.com/uber-go/zap/pull/307
+[#353]: https://github.com/uber-go/zap/pull/353
+[#311]: https://github.com/uber-go/zap/pull/311
+
+## 1.0.0-rc.2 (21 Feb 2017)
This is the second release candidate for zap's stable release. It includes two
breaking changes.
@@ -370,7 +648,16 @@
Thanks to @skipor and @chapsuk for their contributions to this release.
-## v1.0.0-rc.1 (14 Feb 2017)
+[#316]: https://github.com/uber-go/zap/pull/316
+[#309]: https://github.com/uber-go/zap/pull/309
+[#317]: https://github.com/uber-go/zap/pull/317
+[#321]: https://github.com/uber-go/zap/pull/321
+[#325]: https://github.com/uber-go/zap/pull/325
+[#333]: https://github.com/uber-go/zap/pull/333
+[#326]: https://github.com/uber-go/zap/pull/326
+[#300]: https://github.com/uber-go/zap/pull/300
+
+## 1.0.0-rc.1 (14 Feb 2017)
This is the first release candidate for zap's stable release. There are multiple
breaking changes and improvements from the pre-release version. Most notably:
@@ -390,7 +677,7 @@
* Sampling is more accurate, and doesn't depend on the standard library's shared
timer heap.
-## v0.1.0-beta.1 (6 Feb 2017)
+## 0.1.0-beta.1 (6 Feb 2017)
This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and
upgrade at their leisure. Since this is the first tagged release, there are no
@@ -398,95 +685,3 @@
Early zap adopters should pin to the 0.1.x minor version until they're ready to
upgrade to the upcoming stable release.
-
-[#316]: https://github.com/uber-go/zap/pull/316
-[#309]: https://github.com/uber-go/zap/pull/309
-[#317]: https://github.com/uber-go/zap/pull/317
-[#321]: https://github.com/uber-go/zap/pull/321
-[#325]: https://github.com/uber-go/zap/pull/325
-[#333]: https://github.com/uber-go/zap/pull/333
-[#326]: https://github.com/uber-go/zap/pull/326
-[#300]: https://github.com/uber-go/zap/pull/300
-[#339]: https://github.com/uber-go/zap/pull/339
-[#307]: https://github.com/uber-go/zap/pull/307
-[#353]: https://github.com/uber-go/zap/pull/353
-[#311]: https://github.com/uber-go/zap/pull/311
-[#366]: https://github.com/uber-go/zap/pull/366
-[#364]: https://github.com/uber-go/zap/pull/364
-[#371]: https://github.com/uber-go/zap/pull/371
-[#362]: https://github.com/uber-go/zap/pull/362
-[#369]: https://github.com/uber-go/zap/pull/369
-[#347]: https://github.com/uber-go/zap/pull/347
-[#373]: https://github.com/uber-go/zap/pull/373
-[#348]: https://github.com/uber-go/zap/pull/348
-[#327]: https://github.com/uber-go/zap/pull/327
-[#376]: https://github.com/uber-go/zap/pull/376
-[#346]: https://github.com/uber-go/zap/pull/346
-[#365]: https://github.com/uber-go/zap/pull/365
-[#372]: https://github.com/uber-go/zap/pull/372
-[#385]: https://github.com/uber-go/zap/pull/385
-[#396]: https://github.com/uber-go/zap/pull/396
-[#386]: https://github.com/uber-go/zap/pull/386
-[#402]: https://github.com/uber-go/zap/pull/402
-[#415]: https://github.com/uber-go/zap/pull/415
-[#416]: https://github.com/uber-go/zap/pull/416
-[#424]: https://github.com/uber-go/zap/pull/424
-[#425]: https://github.com/uber-go/zap/pull/425
-[#431]: https://github.com/uber-go/zap/pull/431
-[#435]: https://github.com/uber-go/zap/pull/435
-[#444]: https://github.com/uber-go/zap/pull/444
-[#477]: https://github.com/uber-go/zap/pull/477
-[#465]: https://github.com/uber-go/zap/pull/465
-[#460]: https://github.com/uber-go/zap/pull/460
-[#470]: https://github.com/uber-go/zap/pull/470
-[#487]: https://github.com/uber-go/zap/pull/487
-[#490]: https://github.com/uber-go/zap/pull/490
-[#491]: https://github.com/uber-go/zap/pull/491
-[#504]: https://github.com/uber-go/zap/pull/504
-[#508]: https://github.com/uber-go/zap/pull/508
-[#518]: https://github.com/uber-go/zap/pull/518
-[#577]: https://github.com/uber-go/zap/pull/577
-[#574]: https://github.com/uber-go/zap/pull/574
-[#602]: https://github.com/uber-go/zap/pull/602
-[#572]: https://github.com/uber-go/zap/pull/572
-[#606]: https://github.com/uber-go/zap/pull/606
-[#614]: https://github.com/uber-go/zap/pull/614
-[#657]: https://github.com/uber-go/zap/pull/657
-[#706]: https://github.com/uber-go/zap/pull/706
-[#610]: https://github.com/uber-go/zap/pull/610
-[#675]: https://github.com/uber-go/zap/pull/675
-[#704]: https://github.com/uber-go/zap/pull/704
-[#725]: https://github.com/uber-go/zap/pull/725
-[#736]: https://github.com/uber-go/zap/pull/736
-[#751]: https://github.com/uber-go/zap/pull/751
-[#758]: https://github.com/uber-go/zap/pull/758
-[#771]: https://github.com/uber-go/zap/pull/771
-[#773]: https://github.com/uber-go/zap/pull/773
-[#775]: https://github.com/uber-go/zap/pull/775
-[#786]: https://github.com/uber-go/zap/pull/786
-[#791]: https://github.com/uber-go/zap/pull/791
-[#795]: https://github.com/uber-go/zap/pull/795
-[#799]: https://github.com/uber-go/zap/pull/799
-[#804]: https://github.com/uber-go/zap/pull/804
-[#812]: https://github.com/uber-go/zap/pull/812
-[#806]: https://github.com/uber-go/zap/pull/806
-[#813]: https://github.com/uber-go/zap/pull/813
-[#629]: https://github.com/uber-go/zap/pull/629
-[#697]: https://github.com/uber-go/zap/pull/697
-[#828]: https://github.com/uber-go/zap/pull/828
-[#835]: https://github.com/uber-go/zap/pull/835
-[#843]: https://github.com/uber-go/zap/pull/843
-[#844]: https://github.com/uber-go/zap/pull/844
-[#852]: https://github.com/uber-go/zap/pull/852
-[#854]: https://github.com/uber-go/zap/pull/854
-[#861]: https://github.com/uber-go/zap/pull/861
-[#862]: https://github.com/uber-go/zap/pull/862
-[#865]: https://github.com/uber-go/zap/pull/865
-[#867]: https://github.com/uber-go/zap/pull/867
-[#881]: https://github.com/uber-go/zap/pull/881
-[#903]: https://github.com/uber-go/zap/pull/903
-[#912]: https://github.com/uber-go/zap/pull/912
-[#913]: https://github.com/uber-go/zap/pull/913
-[#928]: https://github.com/uber-go/zap/pull/928
-[#931]: https://github.com/uber-go/zap/pull/931
-[#936]: https://github.com/uber-go/zap/pull/936
diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md
index 5cd9656..ea02f3c 100644
--- a/vendor/go.uber.org/zap/CONTRIBUTING.md
+++ b/vendor/go.uber.org/zap/CONTRIBUTING.md
@@ -16,7 +16,7 @@
[Fork][fork], then clone the repository:
-```
+```bash
mkdir -p $GOPATH/src/go.uber.org
cd $GOPATH/src/go.uber.org
git clone git@github.com:your_github_username/zap.git
@@ -27,21 +27,16 @@
Make sure that the tests and the linters pass:
-```
+```bash
make test
make lint
```
-If you're not using the minor version of Go specified in the Makefile's
-`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is
-fine, but it means that you'll only discover lint failures after you open your
-pull request.
-
## Making Changes
Start by creating a new branch for your changes:
-```
+```bash
cd $GOPATH/src/go.uber.org/zap
git checkout master
git fetch upstream
@@ -52,22 +47,22 @@
Make your changes, then ensure that `make lint` and `make test` still pass. If
you're satisfied with your changes, push them to your fork.
-```
+```bash
git push origin cool_new_feature
```
Then use the GitHub UI to open a pull request.
-At this point, you're waiting on us to review your changes. We *try* to respond
+At this point, you're waiting on us to review your changes. We _try_ to respond
to issues and pull requests within a few business days, and we may suggest some
improvements or alternatives. Once your changes are approved, one of the
project maintainers will merge them.
We're much more likely to approve your changes if you:
-* Add tests for new functionality.
-* Write a [good commit message][commit-message].
-* Maintain backward compatibility.
+- Add tests for new functionality.
+- Write a [good commit message][commit-message].
+- Maintain backward compatibility.
[fork]: https://github.com/uber-go/zap/fork
[open-issue]: https://github.com/uber-go/zap/issues/new
diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE
similarity index 100%
rename from vendor/go.uber.org/zap/LICENSE.txt
rename to vendor/go.uber.org/zap/LICENSE
diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile
index 9b1bc3b..eb1cee5 100644
--- a/vendor/go.uber.org/zap/Makefile
+++ b/vendor/go.uber.org/zap/Makefile
@@ -1,50 +1,51 @@
-export GOBIN ?= $(shell pwd)/bin
+# Directory containing the Makefile.
+PROJECT_ROOT = $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
-GOLINT = $(GOBIN)/golint
-STATICCHECK = $(GOBIN)/staticcheck
+export GOBIN ?= $(PROJECT_ROOT)/bin
+export PATH := $(GOBIN):$(PATH)
+
+GOVULNCHECK = $(GOBIN)/govulncheck
BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem
# Directories containing independent Go modules.
-#
-# We track coverage only for the main module.
-MODULE_DIRS = . ./benchmarks ./zapgrpc/internal/test
+MODULE_DIRS = . ./exp ./benchmarks ./zapgrpc/internal/test
-# Many Go tools take file globs or directories as arguments instead of packages.
-GO_FILES := $(shell \
- find . '(' -path '*/.*' -o -path './vendor' ')' -prune \
- -o -name '*.go' -print | cut -b3-)
+# Directories that we want to track coverage for.
+COVER_DIRS = . ./exp
.PHONY: all
all: lint test
.PHONY: lint
-lint: $(GOLINT) $(STATICCHECK)
- @rm -rf lint.log
- @echo "Checking formatting..."
- @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log
- @echo "Checking vet..."
- @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log
- @echo "Checking lint..."
- @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(GOLINT) ./... 2>&1) &&) true | tee -a lint.log
- @echo "Checking staticcheck..."
- @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log
- @echo "Checking for unresolved FIXMEs..."
- @git grep -i fixme | grep -v -e Makefile | tee -a lint.log
- @echo "Checking for license headers..."
- @./checklicense.sh | tee -a lint.log
- @[ ! -s lint.log ]
- @echo "Checking 'go mod tidy'..."
- @make tidy
- @if ! git diff --quiet; then \
- echo "'go mod tidy' resulted in changes or working tree is dirty:"; \
- git --no-pager diff; \
- fi
+lint: golangci-lint tidy-lint license-lint
-$(GOLINT):
- cd tools && go install golang.org/x/lint/golint
+.PHONY: golangci-lint
+golangci-lint:
+ @$(foreach mod,$(MODULE_DIRS), \
+ (cd $(mod) && \
+ echo "[lint] golangci-lint: $(mod)" && \
+ golangci-lint run --path-prefix $(mod)) &&) true
-$(STATICCHECK):
- cd tools && go install honnef.co/go/tools/cmd/staticcheck
+.PHONY: tidy
+tidy:
+ @$(foreach dir,$(MODULE_DIRS), \
+ (cd $(dir) && go mod tidy) &&) true
+
+.PHONY: tidy-lint
+tidy-lint:
+ @$(foreach mod,$(MODULE_DIRS), \
+ (cd $(mod) && \
+ echo "[lint] tidy: $(mod)" && \
+ go mod tidy && \
+ git diff --exit-code -- go.mod go.sum) &&) true
+
+
+.PHONY: license-lint
+license-lint:
+ ./checklicense.sh
+
+$(GOVULNCHECK):
+ cd tools && go install golang.org/x/vuln/cmd/govulncheck
.PHONY: test
test:
@@ -52,8 +53,10 @@
.PHONY: cover
cover:
- go test -race -coverprofile=cover.out -coverpkg=./... ./...
- go tool cover -html=cover.out -o cover.html
+ @$(foreach dir,$(COVER_DIRS), ( \
+ cd $(dir) && \
+ go test -race -coverprofile=cover.out -coverpkg=./... ./... \
+ && go tool cover -html=cover.out -o cover.html) &&) true
.PHONY: bench
BENCH ?= .
@@ -68,6 +71,6 @@
rm -f README.md
cat .readme.tmpl | go run internal/readme/readme.go > README.md
-.PHONY: tidy
-tidy:
- @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go mod tidy) &&) true
+.PHONY: vulncheck
+vulncheck: $(GOVULNCHECK)
+ $(GOVULNCHECK) ./...
diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md
index 1e64d6c..a17035c 100644
--- a/vendor/go.uber.org/zap/README.md
+++ b/vendor/go.uber.org/zap/README.md
@@ -1,7 +1,16 @@
-# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+# :zap: zap
+
+
+<div align="center">
Blazing fast, structured, leveled logging in Go.
+
+
+[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+</div>
+
## Installation
`go get -u go.uber.org/zap`
@@ -66,38 +75,44 @@
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 862 ns/op | +0% | 5 allocs/op
-| :zap: zap (sugared) | 1250 ns/op | +45% | 11 allocs/op
-| zerolog | 4021 ns/op | +366% | 76 allocs/op
-| go-kit | 4542 ns/op | +427% | 105 allocs/op
-| apex/log | 26785 ns/op | +3007% | 115 allocs/op
-| logrus | 29501 ns/op | +3322% | 125 allocs/op
-| log15 | 29906 ns/op | +3369% | 122 allocs/op
+| :zap: zap | 656 ns/op | +0% | 5 allocs/op
+| :zap: zap (sugared) | 935 ns/op | +43% | 10 allocs/op
+| zerolog | 380 ns/op | -42% | 1 allocs/op
+| go-kit | 2249 ns/op | +243% | 57 allocs/op
+| slog (LogAttrs) | 2479 ns/op | +278% | 40 allocs/op
+| slog | 2481 ns/op | +278% | 42 allocs/op
+| apex/log | 9591 ns/op | +1362% | 63 allocs/op
+| log15 | 11393 ns/op | +1637% | 75 allocs/op
+| logrus | 11654 ns/op | +1677% | 79 allocs/op
Log a message with a logger that already has 10 fields of context:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 126 ns/op | +0% | 0 allocs/op
-| :zap: zap (sugared) | 187 ns/op | +48% | 2 allocs/op
-| zerolog | 88 ns/op | -30% | 0 allocs/op
-| go-kit | 5087 ns/op | +3937% | 103 allocs/op
-| log15 | 18548 ns/op | +14621% | 73 allocs/op
-| apex/log | 26012 ns/op | +20544% | 104 allocs/op
-| logrus | 27236 ns/op | +21516% | 113 allocs/op
+| :zap: zap | 67 ns/op | +0% | 0 allocs/op
+| :zap: zap (sugared) | 84 ns/op | +25% | 1 allocs/op
+| zerolog | 35 ns/op | -48% | 0 allocs/op
+| slog | 193 ns/op | +188% | 0 allocs/op
+| slog (LogAttrs) | 200 ns/op | +199% | 0 allocs/op
+| go-kit | 2460 ns/op | +3572% | 56 allocs/op
+| log15 | 9038 ns/op | +13390% | 70 allocs/op
+| apex/log | 9068 ns/op | +13434% | 53 allocs/op
+| logrus | 10521 ns/op | +15603% | 68 allocs/op
Log a static string, without any context or `printf`-style templating:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 118 ns/op | +0% | 0 allocs/op
-| :zap: zap (sugared) | 191 ns/op | +62% | 2 allocs/op
-| zerolog | 93 ns/op | -21% | 0 allocs/op
-| go-kit | 280 ns/op | +137% | 11 allocs/op
-| standard library | 499 ns/op | +323% | 2 allocs/op
-| apex/log | 1990 ns/op | +1586% | 10 allocs/op
-| logrus | 3129 ns/op | +2552% | 24 allocs/op
-| log15 | 3887 ns/op | +3194% | 23 allocs/op
+| :zap: zap | 63 ns/op | +0% | 0 allocs/op
+| :zap: zap (sugared) | 81 ns/op | +29% | 1 allocs/op
+| zerolog | 32 ns/op | -49% | 0 allocs/op
+| standard library | 124 ns/op | +97% | 1 allocs/op
+| slog | 196 ns/op | +211% | 0 allocs/op
+| slog (LogAttrs) | 200 ns/op | +217% | 0 allocs/op
+| go-kit | 213 ns/op | +238% | 9 allocs/op
+| apex/log | 771 ns/op | +1124% | 5 allocs/op
+| logrus | 1439 ns/op | +2184% | 23 allocs/op
+| log15 | 2069 ns/op | +3184% | 20 allocs/op
## Development Status: Stable
@@ -117,7 +132,7 @@
<hr>
-Released under the [MIT License](LICENSE.txt).
+Released under the [MIT License](LICENSE).
<sup id="footnote-versions">1</sup> In particular, keep in mind that we may be
benchmarking against slightly older versions of other packages. Versions are
diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go
index 5be3704..abfccb5 100644
--- a/vendor/go.uber.org/zap/array.go
+++ b/vendor/go.uber.org/zap/array.go
@@ -21,6 +21,7 @@
package zap
import (
+ "fmt"
"time"
"go.uber.org/zap/zapcore"
@@ -94,11 +95,137 @@
return Array(key, int8s(nums))
}
+// Objects constructs a field with the given key, holding a list of the
+// provided objects that can be marshaled by Zap.
+//
+// Note that these objects must implement zapcore.ObjectMarshaler directly.
+// That is, if you're trying to marshal a []Request, the MarshalLogObject
+// method must be declared on the Request type, not its pointer (*Request).
+// If it's on the pointer, use ObjectValues.
+//
+// Given an object that implements MarshalLogObject on the value receiver, you
+// can log a slice of those objects with Objects like so:
+//
+// type Author struct{ ... }
+// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+// var authors []Author = ...
+// logger.Info("loading article", zap.Objects("authors", authors))
+//
+// Similarly, given a type that implements MarshalLogObject on its pointer
+// receiver, you can log a slice of pointers to that object with Objects like
+// so:
+//
+// type Request struct{ ... }
+// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+// var requests []*Request = ...
+// logger.Info("sending requests", zap.Objects("requests", requests))
+//
+// If instead, you have a slice of values of such an object, use the
+// ObjectValues constructor.
+//
+// var requests []Request = ...
+// logger.Info("sending requests", zap.ObjectValues("requests", requests))
+func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field {
+ return Array(key, objects[T](values))
+}
+
+type objects[T zapcore.ObjectMarshaler] []T
+
+func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for _, o := range os {
+ if err := arr.AppendObject(o); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ObjectMarshalerPtr is a constraint that specifies that the given type
+// implements zapcore.ObjectMarshaler on a pointer receiver.
+type ObjectMarshalerPtr[T any] interface {
+ *T
+ zapcore.ObjectMarshaler
+}
+
+// ObjectValues constructs a field with the given key, holding a list of the
+// provided objects, where pointers to these objects can be marshaled by Zap.
+//
+// Note that pointers to these objects must implement zapcore.ObjectMarshaler.
+// That is, if you're trying to marshal a []Request, the MarshalLogObject
+// method must be declared on the *Request type, not the value (Request).
+// If it's on the value, use Objects.
+//
+// Given an object that implements MarshalLogObject on the pointer receiver,
+// you can log a slice of those objects with ObjectValues like so:
+//
+// type Request struct{ ... }
+// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+// var requests []Request = ...
+// logger.Info("sending requests", zap.ObjectValues("requests", requests))
+//
+// If instead, you have a slice of pointers of such an object, use the Objects
+// field constructor.
+//
+// var requests []*Request = ...
+// logger.Info("sending requests", zap.Objects("requests", requests))
+func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field {
+ return Array(key, objectValues[T, P](values))
+}
+
+type objectValues[T any, P ObjectMarshalerPtr[T]] []T
+
+func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range os {
+ // It is necessary for us to explicitly reference the "P" type.
+ // We cannot simply pass "&os[i]" to AppendObject because its type
+ // is "*T", which the type system does not consider as
+ // implementing ObjectMarshaler.
+ // Only the type "P" satisfies ObjectMarshaler, which we have
+ // to convert "*T" to explicitly.
+ var p P = &os[i]
+ if err := arr.AppendObject(p); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
// Strings constructs a field that carries a slice of strings.
func Strings(key string, ss []string) Field {
return Array(key, stringArray(ss))
}
+// Stringers constructs a field with the given key, holding a list of the
+// output provided by the value's String method
+//
+// Given an object that implements String on the value receiver, you
+// can log a slice of those objects with Objects like so:
+//
+// type Request struct{ ... }
+// func (a Request) String() string
+//
+// var requests []Request = ...
+// logger.Info("sending requests", zap.Stringers("requests", requests))
+//
+// Note that these objects must implement fmt.Stringer directly.
+// That is, if you're trying to marshal a []Request, the String method
+// must be declared on the Request type, not its pointer (*Request).
+func Stringers[T fmt.Stringer](key string, values []T) Field {
+ return Array(key, stringers[T](values))
+}
+
+type stringers[T fmt.Stringer] []T
+
+func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for _, o := range os {
+ arr.AppendString(o.String())
+ }
+ return nil
+}
+
// Times constructs a field that carries a slice of time.Times.
func Times(key string, ts []time.Time) Field {
return Array(key, times(ts))
diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go
index 9e929cd..0b8540c 100644
--- a/vendor/go.uber.org/zap/buffer/buffer.go
+++ b/vendor/go.uber.org/zap/buffer/buffer.go
@@ -42,6 +42,11 @@
b.bs = append(b.bs, v)
}
+// AppendBytes writes the given slice of bytes to the Buffer.
+func (b *Buffer) AppendBytes(v []byte) {
+ b.bs = append(b.bs, v...)
+}
+
// AppendString writes a string to the Buffer.
func (b *Buffer) AppendString(s string) {
b.bs = append(b.bs, s...)
diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go
index 8fb3e20..8463233 100644
--- a/vendor/go.uber.org/zap/buffer/pool.go
+++ b/vendor/go.uber.org/zap/buffer/pool.go
@@ -20,25 +20,29 @@
package buffer
-import "sync"
+import (
+ "go.uber.org/zap/internal/pool"
+)
// A Pool is a type-safe wrapper around a sync.Pool.
type Pool struct {
- p *sync.Pool
+ p *pool.Pool[*Buffer]
}
// NewPool constructs a new Pool.
func NewPool() Pool {
- return Pool{p: &sync.Pool{
- New: func() interface{} {
- return &Buffer{bs: make([]byte, 0, _size)}
- },
- }}
+ return Pool{
+ p: pool.New(func() *Buffer {
+ return &Buffer{
+ bs: make([]byte, 0, _size),
+ }
+ }),
+ }
}
// Get retrieves a Buffer from the pool, creating one if necessary.
func (p Pool) Get() *Buffer {
- buf := p.p.Get().(*Buffer)
+ buf := p.p.Get()
buf.Reset()
buf.pool = p
return buf
diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go
index 55637fb..e76e4e6 100644
--- a/vendor/go.uber.org/zap/config.go
+++ b/vendor/go.uber.org/zap/config.go
@@ -21,7 +21,7 @@
package zap
import (
- "fmt"
+ "errors"
"sort"
"time"
@@ -95,6 +95,32 @@
// NewProductionEncoderConfig returns an opinionated EncoderConfig for
// production environments.
+//
+// Messages encoded with this configuration will be JSON-formatted
+// and will have the following keys by default:
+//
+// - "level": The logging level (e.g. "info", "error").
+// - "ts": The current time in number of seconds since the Unix epoch.
+// - "msg": The message passed to the log statement.
+// - "caller": If available, a short path to the file and line number
+// where the log statement was issued.
+// The logger configuration determines whether this field is captured.
+// - "stacktrace": If available, a stack trace from the line
+// where the log statement was issued.
+// The logger configuration determines whether this field is captured.
+//
+// By default, the following formats are used for different types:
+//
+// - Time is formatted as floating-point number of seconds since the Unix
+// epoch.
+// - Duration is formatted as floating-point number of seconds.
+//
+// You may change these by setting the appropriate fields in the returned
+// object.
+// For example, use the following to change the time encoding format:
+//
+// cfg := zap.NewProductionEncoderConfig()
+// cfg.EncodeTime = zapcore.ISO8601TimeEncoder
func NewProductionEncoderConfig() zapcore.EncoderConfig {
return zapcore.EncoderConfig{
TimeKey: "ts",
@@ -112,11 +138,22 @@
}
}
-// NewProductionConfig is a reasonable production logging configuration.
-// Logging is enabled at InfoLevel and above.
+// NewProductionConfig builds a reasonable default production logging
+// configuration.
+// Logging is enabled at InfoLevel and above, and uses a JSON encoder.
+// Logs are written to standard error.
+// Stacktraces are included on logs of ErrorLevel and above.
+// DPanicLevel logs will not panic, but will write a stacktrace.
//
-// It uses a JSON encoder, writes to standard error, and enables sampling.
-// Stacktraces are automatically included on logs of ErrorLevel and above.
+// Sampling is enabled at 100:100 by default,
+// meaning that after the first 100 log entries
+// with the same level and message in the same second,
+// it will log every 100th entry
+// with the same level and message in the same second.
+// You may disable this behavior by setting Sampling to nil.
+//
+// See [NewProductionEncoderConfig] for information
+// on the default encoder configuration.
func NewProductionConfig() Config {
return Config{
Level: NewAtomicLevelAt(InfoLevel),
@@ -134,6 +171,32 @@
// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for
// development environments.
+//
+// Messages encoded with this configuration will use Zap's console encoder
+// intended to print human-readable output.
+// It will print log messages with the following information:
+//
+// - The log level (e.g. "INFO", "ERROR").
+// - The time in ISO8601 format (e.g. "2017-01-01T12:00:00Z").
+// - The message passed to the log statement.
+// - If available, a short path to the file and line number
+// where the log statement was issued.
+// The logger configuration determines whether this field is captured.
+// - If available, a stacktrace from the line
+// where the log statement was issued.
+// The logger configuration determines whether this field is captured.
+//
+// By default, the following formats are used for different types:
+//
+// - Time is formatted in ISO8601 format (e.g. "2017-01-01T12:00:00Z").
+// - Duration is formatted as a string (e.g. "1.234s").
+//
+// You may change these by setting the appropriate fields in the returned
+// object.
+// For example, use the following to change the time encoding format:
+//
+// cfg := zap.NewDevelopmentEncoderConfig()
+// cfg.EncodeTime = zapcore.ISO8601TimeEncoder
func NewDevelopmentEncoderConfig() zapcore.EncoderConfig {
return zapcore.EncoderConfig{
// Keys can be anything except the empty string.
@@ -152,12 +215,15 @@
}
}
-// NewDevelopmentConfig is a reasonable development logging configuration.
-// Logging is enabled at DebugLevel and above.
+// NewDevelopmentConfig builds a reasonable default development logging
+// configuration.
+// Logging is enabled at DebugLevel and above, and uses a console encoder.
+// Logs are written to standard error.
+// Stacktraces are included on logs of WarnLevel and above.
+// DPanicLevel logs will panic.
//
-// It enables development mode (which makes DPanicLevel logs panic), uses a
-// console encoder, writes to standard error, and disables sampling.
-// Stacktraces are automatically included on logs of WarnLevel and above.
+// See [NewDevelopmentEncoderConfig] for information
+// on the default encoder configuration.
func NewDevelopmentConfig() Config {
return Config{
Level: NewAtomicLevelAt(DebugLevel),
@@ -182,7 +248,7 @@
}
if cfg.Level == (AtomicLevel{}) {
- return nil, fmt.Errorf("missing Level")
+ return nil, errors.New("missing Level")
}
log := New(
diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go
index 8638dd1..3c50d7b 100644
--- a/vendor/go.uber.org/zap/doc.go
+++ b/vendor/go.uber.org/zap/doc.go
@@ -32,7 +32,7 @@
// they need to count every allocation and when they'd prefer a more familiar,
// loosely typed API.
//
-// Choosing a Logger
+// # Choosing a Logger
//
// In contexts where performance is nice, but not critical, use the
// SugaredLogger. It's 4-10x faster than other structured logging packages and
@@ -41,14 +41,15 @@
// variadic number of key-value pairs. (For more advanced use cases, they also
// accept strongly typed fields - see the SugaredLogger.With documentation for
// details.)
-// sugar := zap.NewExample().Sugar()
-// defer sugar.Sync()
-// sugar.Infow("failed to fetch URL",
-// "url", "http://example.com",
-// "attempt", 3,
-// "backoff", time.Second,
-// )
-// sugar.Infof("failed to fetch URL: %s", "http://example.com")
+//
+// sugar := zap.NewExample().Sugar()
+// defer sugar.Sync()
+// sugar.Infow("failed to fetch URL",
+// "url", "http://example.com",
+// "attempt", 3,
+// "backoff", time.Second,
+// )
+// sugar.Infof("failed to fetch URL: %s", "http://example.com")
//
// By default, loggers are unbuffered. However, since zap's low-level APIs
// allow buffering, calling Sync before letting your process exit is a good
@@ -57,32 +58,35 @@
// In the rare contexts where every microsecond and every allocation matter,
// use the Logger. It's even faster than the SugaredLogger and allocates far
// less, but it only supports strongly-typed, structured logging.
-// logger := zap.NewExample()
-// defer logger.Sync()
-// logger.Info("failed to fetch URL",
-// zap.String("url", "http://example.com"),
-// zap.Int("attempt", 3),
-// zap.Duration("backoff", time.Second),
-// )
+//
+// logger := zap.NewExample()
+// defer logger.Sync()
+// logger.Info("failed to fetch URL",
+// zap.String("url", "http://example.com"),
+// zap.Int("attempt", 3),
+// zap.Duration("backoff", time.Second),
+// )
//
// Choosing between the Logger and SugaredLogger doesn't need to be an
// application-wide decision: converting between the two is simple and
// inexpensive.
-// logger := zap.NewExample()
-// defer logger.Sync()
-// sugar := logger.Sugar()
-// plain := sugar.Desugar()
//
-// Configuring Zap
+// logger := zap.NewExample()
+// defer logger.Sync()
+// sugar := logger.Sugar()
+// plain := sugar.Desugar()
+//
+// # Configuring Zap
//
// The simplest way to build a Logger is to use zap's opinionated presets:
// NewExample, NewProduction, and NewDevelopment. These presets build a logger
// with a single function call:
-// logger, err := zap.NewProduction()
-// if err != nil {
-// log.Fatalf("can't initialize zap logger: %v", err)
-// }
-// defer logger.Sync()
+//
+// logger, err := zap.NewProduction()
+// if err != nil {
+// log.Fatalf("can't initialize zap logger: %v", err)
+// }
+// defer logger.Sync()
//
// Presets are fine for small projects, but larger projects and organizations
// naturally require a bit more customization. For most users, zap's Config
@@ -94,7 +98,7 @@
// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration
// example for sample code.
//
-// Extending Zap
+// # Extending Zap
//
// The zap package itself is a relatively thin wrapper around the interfaces
// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g.,
@@ -106,7 +110,7 @@
// Similarly, package authors can use the high-performance Encoder and Core
// implementations in the zapcore package to build their own loggers.
//
-// Frequently Asked Questions
+// # Frequently Asked Questions
//
// An FAQ covering everything from installation errors to design decisions is
// available at https://github.com/uber-go/zap/blob/master/FAQ.md.
diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go
index 08ed833..caa04ce 100644
--- a/vendor/go.uber.org/zap/encoder.go
+++ b/vendor/go.uber.org/zap/encoder.go
@@ -63,7 +63,7 @@
func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil {
- return nil, fmt.Errorf("missing EncodeTime in EncoderConfig")
+ return nil, errors.New("missing EncodeTime in EncoderConfig")
}
_encoderMutex.RLock()
diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go
index 65982a5..45f7b83 100644
--- a/vendor/go.uber.org/zap/error.go
+++ b/vendor/go.uber.org/zap/error.go
@@ -21,14 +21,13 @@
package zap
import (
- "sync"
-
+ "go.uber.org/zap/internal/pool"
"go.uber.org/zap/zapcore"
)
-var _errArrayElemPool = sync.Pool{New: func() interface{} {
+var _errArrayElemPool = pool.New(func() *errArrayElem {
return &errArrayElem{}
-}}
+})
// Error is shorthand for the common idiom NamedError("error", err).
func Error(err error) Field {
@@ -60,11 +59,14 @@
// potentially an "errorVerbose" attribute, we need to wrap it in a
// type that implements LogObjectMarshaler. To prevent this from
// allocating, pool the wrapper type.
- elem := _errArrayElemPool.Get().(*errArrayElem)
+ elem := _errArrayElemPool.Get()
elem.error = errs[i]
- arr.AppendObject(elem)
+ err := arr.AppendObject(elem)
elem.error = nil
_errArrayElemPool.Put(elem)
+ if err != nil {
+ return err
+ }
}
return nil
}
diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go
index bbb745d..6743930 100644
--- a/vendor/go.uber.org/zap/field.go
+++ b/vendor/go.uber.org/zap/field.go
@@ -25,6 +25,7 @@
"math"
"time"
+ "go.uber.org/zap/internal/stacktrace"
"go.uber.org/zap/zapcore"
)
@@ -374,7 +375,7 @@
// from expanding the zapcore.Field union struct to include a byte slice. Since
// taking a stacktrace is already so expensive (~10us), the extra allocation
// is okay.
- return String(key, takeStacktrace(skip+1)) // skip StackSkip
+ return String(key, stacktrace.Take(skip+1)) // skip StackSkip
}
// Duration constructs a field with the given key and value. The encoder
@@ -410,6 +411,65 @@
}
}
+// Dict constructs a field containing the provided key-value pairs.
+// It acts similar to [Object], but with the fields specified as arguments.
+func Dict(key string, val ...Field) Field {
+ return dictField(key, val)
+}
+
+// We need a function with the signature (string, T) for zap.Any.
+func dictField(key string, val []Field) Field {
+ return Object(key, dictObject(val))
+}
+
+type dictObject []Field
+
+func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ for _, f := range d {
+ f.AddTo(enc)
+ }
+ return nil
+}
+
+// We discovered an issue where zap.Any can cause a performance degradation
+// when used in new goroutines.
+//
+// This happens because the compiler assigns 4.8kb (one zap.Field per arm of
+// switch statement) of stack space for zap.Any when it takes the form:
+//
+// switch v := v.(type) {
+// case string:
+// return String(key, v)
+// case int:
+// return Int(key, v)
+// // ...
+// default:
+// return Reflect(key, v)
+// }
+//
+// To avoid this, we use the type switch to assign a value to a single local variable
+// and then call a function on it.
+// The local variable is just a function reference so it doesn't allocate
+// when converted to an interface{}.
+//
+// A fair bit of experimentation went into this.
+// See also:
+//
+// - https://github.com/uber-go/zap/pull/1301
+// - https://github.com/uber-go/zap/pull/1303
+// - https://github.com/uber-go/zap/pull/1304
+// - https://github.com/uber-go/zap/pull/1305
+// - https://github.com/uber-go/zap/pull/1308
+//
+// See https://github.com/golang/go/issues/62077 for upstream issue.
+type anyFieldC[T any] func(string, T) Field
+
+func (f anyFieldC[T]) Any(key string, val any) Field {
+ v, _ := val.(T)
+ // val is guaranteed to be a T, except when it's nil.
+ return f(key, v)
+}
+
// Any takes a key and an arbitrary value and chooses the best way to represent
// them as a field, falling back to a reflection-based approach only if
// necessary.
@@ -418,132 +478,138 @@
// them. To minimize surprises, []byte values are treated as binary blobs, byte
// values are treated as uint8, and runes are always treated as integers.
func Any(key string, value interface{}) Field {
- switch val := value.(type) {
+ var c interface{ Any(string, any) Field }
+
+ switch value.(type) {
case zapcore.ObjectMarshaler:
- return Object(key, val)
+ c = anyFieldC[zapcore.ObjectMarshaler](Object)
case zapcore.ArrayMarshaler:
- return Array(key, val)
+ c = anyFieldC[zapcore.ArrayMarshaler](Array)
+ case []Field:
+ c = anyFieldC[[]Field](dictField)
case bool:
- return Bool(key, val)
+ c = anyFieldC[bool](Bool)
case *bool:
- return Boolp(key, val)
+ c = anyFieldC[*bool](Boolp)
case []bool:
- return Bools(key, val)
+ c = anyFieldC[[]bool](Bools)
case complex128:
- return Complex128(key, val)
+ c = anyFieldC[complex128](Complex128)
case *complex128:
- return Complex128p(key, val)
+ c = anyFieldC[*complex128](Complex128p)
case []complex128:
- return Complex128s(key, val)
+ c = anyFieldC[[]complex128](Complex128s)
case complex64:
- return Complex64(key, val)
+ c = anyFieldC[complex64](Complex64)
case *complex64:
- return Complex64p(key, val)
+ c = anyFieldC[*complex64](Complex64p)
case []complex64:
- return Complex64s(key, val)
+ c = anyFieldC[[]complex64](Complex64s)
case float64:
- return Float64(key, val)
+ c = anyFieldC[float64](Float64)
case *float64:
- return Float64p(key, val)
+ c = anyFieldC[*float64](Float64p)
case []float64:
- return Float64s(key, val)
+ c = anyFieldC[[]float64](Float64s)
case float32:
- return Float32(key, val)
+ c = anyFieldC[float32](Float32)
case *float32:
- return Float32p(key, val)
+ c = anyFieldC[*float32](Float32p)
case []float32:
- return Float32s(key, val)
+ c = anyFieldC[[]float32](Float32s)
case int:
- return Int(key, val)
+ c = anyFieldC[int](Int)
case *int:
- return Intp(key, val)
+ c = anyFieldC[*int](Intp)
case []int:
- return Ints(key, val)
+ c = anyFieldC[[]int](Ints)
case int64:
- return Int64(key, val)
+ c = anyFieldC[int64](Int64)
case *int64:
- return Int64p(key, val)
+ c = anyFieldC[*int64](Int64p)
case []int64:
- return Int64s(key, val)
+ c = anyFieldC[[]int64](Int64s)
case int32:
- return Int32(key, val)
+ c = anyFieldC[int32](Int32)
case *int32:
- return Int32p(key, val)
+ c = anyFieldC[*int32](Int32p)
case []int32:
- return Int32s(key, val)
+ c = anyFieldC[[]int32](Int32s)
case int16:
- return Int16(key, val)
+ c = anyFieldC[int16](Int16)
case *int16:
- return Int16p(key, val)
+ c = anyFieldC[*int16](Int16p)
case []int16:
- return Int16s(key, val)
+ c = anyFieldC[[]int16](Int16s)
case int8:
- return Int8(key, val)
+ c = anyFieldC[int8](Int8)
case *int8:
- return Int8p(key, val)
+ c = anyFieldC[*int8](Int8p)
case []int8:
- return Int8s(key, val)
+ c = anyFieldC[[]int8](Int8s)
case string:
- return String(key, val)
+ c = anyFieldC[string](String)
case *string:
- return Stringp(key, val)
+ c = anyFieldC[*string](Stringp)
case []string:
- return Strings(key, val)
+ c = anyFieldC[[]string](Strings)
case uint:
- return Uint(key, val)
+ c = anyFieldC[uint](Uint)
case *uint:
- return Uintp(key, val)
+ c = anyFieldC[*uint](Uintp)
case []uint:
- return Uints(key, val)
+ c = anyFieldC[[]uint](Uints)
case uint64:
- return Uint64(key, val)
+ c = anyFieldC[uint64](Uint64)
case *uint64:
- return Uint64p(key, val)
+ c = anyFieldC[*uint64](Uint64p)
case []uint64:
- return Uint64s(key, val)
+ c = anyFieldC[[]uint64](Uint64s)
case uint32:
- return Uint32(key, val)
+ c = anyFieldC[uint32](Uint32)
case *uint32:
- return Uint32p(key, val)
+ c = anyFieldC[*uint32](Uint32p)
case []uint32:
- return Uint32s(key, val)
+ c = anyFieldC[[]uint32](Uint32s)
case uint16:
- return Uint16(key, val)
+ c = anyFieldC[uint16](Uint16)
case *uint16:
- return Uint16p(key, val)
+ c = anyFieldC[*uint16](Uint16p)
case []uint16:
- return Uint16s(key, val)
+ c = anyFieldC[[]uint16](Uint16s)
case uint8:
- return Uint8(key, val)
+ c = anyFieldC[uint8](Uint8)
case *uint8:
- return Uint8p(key, val)
+ c = anyFieldC[*uint8](Uint8p)
case []byte:
- return Binary(key, val)
+ c = anyFieldC[[]byte](Binary)
case uintptr:
- return Uintptr(key, val)
+ c = anyFieldC[uintptr](Uintptr)
case *uintptr:
- return Uintptrp(key, val)
+ c = anyFieldC[*uintptr](Uintptrp)
case []uintptr:
- return Uintptrs(key, val)
+ c = anyFieldC[[]uintptr](Uintptrs)
case time.Time:
- return Time(key, val)
+ c = anyFieldC[time.Time](Time)
case *time.Time:
- return Timep(key, val)
+ c = anyFieldC[*time.Time](Timep)
case []time.Time:
- return Times(key, val)
+ c = anyFieldC[[]time.Time](Times)
case time.Duration:
- return Duration(key, val)
+ c = anyFieldC[time.Duration](Duration)
case *time.Duration:
- return Durationp(key, val)
+ c = anyFieldC[*time.Duration](Durationp)
case []time.Duration:
- return Durations(key, val)
+ c = anyFieldC[[]time.Duration](Durations)
case error:
- return NamedError(key, val)
+ c = anyFieldC[error](NamedError)
case []error:
- return Errors(key, val)
+ c = anyFieldC[[]error](Errors)
case fmt.Stringer:
- return Stringer(key, val)
+ c = anyFieldC[fmt.Stringer](Stringer)
default:
- return Reflect(key, val)
+ c = anyFieldC[any](Reflect)
}
+
+ return c.Any(key, value)
}
diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go
index c1ac050..3cb46c9 100644
--- a/vendor/go.uber.org/zap/global.go
+++ b/vendor/go.uber.org/zap/global.go
@@ -31,6 +31,7 @@
)
const (
+ _stdLogDefaultDepth = 1
_loggerWriterDepth = 2
_programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " +
"https://github.com/uber-go/zap/issues/new and reference this error: %v"
diff --git a/vendor/go.uber.org/zap/global_go112.go b/vendor/go.uber.org/zap/global_go112.go
deleted file mode 100644
index 6b5dbda..0000000
--- a/vendor/go.uber.org/zap/global_go112.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// See #682 for more information.
-// +build go1.12
-
-package zap
-
-const _stdLogDefaultDepth = 1
diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go
index 1297c33..2be8f65 100644
--- a/vendor/go.uber.org/zap/http_handler.go
+++ b/vendor/go.uber.org/zap/http_handler.go
@@ -22,6 +22,7 @@
import (
"encoding/json"
+ "errors"
"fmt"
"io"
"net/http"
@@ -32,22 +33,23 @@
// ServeHTTP is a simple JSON endpoint that can report on or change the current
// logging level.
//
-// GET
+// # GET
//
// The GET request returns a JSON description of the current logging level like:
-// {"level":"info"}
//
-// PUT
+// {"level":"info"}
+//
+// # PUT
//
// The PUT request changes the logging level. It is perfectly safe to change the
// logging level while a program is running. Two content types are supported:
//
-// Content-Type: application/x-www-form-urlencoded
+// Content-Type: application/x-www-form-urlencoded
//
// With this content type, the level can be provided through the request body or
// a query parameter. The log level is URL encoded like:
//
-// level=debug
+// level=debug
//
// The request body takes precedence over the query parameter, if both are
// specified.
@@ -55,19 +57,25 @@
// This content type is the default for a curl PUT request. Following are two
// example curl requests that both set the logging level to debug.
//
-// curl -X PUT localhost:8080/log/level?level=debug
-// curl -X PUT localhost:8080/log/level -d level=debug
+// curl -X PUT localhost:8080/log/level?level=debug
+// curl -X PUT localhost:8080/log/level -d level=debug
//
// For any other content type, the payload is expected to be JSON encoded and
// look like:
//
-// {"level":"info"}
+// {"level":"info"}
//
// An example curl request could look like this:
//
-// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}'
-//
+// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}'
func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if err := lvl.serveHTTP(w, r); err != nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprintf(w, "internal error: %v", err)
+ }
+}
+
+func (lvl AtomicLevel) serveHTTP(w http.ResponseWriter, r *http.Request) error {
type errorResponse struct {
Error string `json:"error"`
}
@@ -79,19 +87,20 @@
switch r.Method {
case http.MethodGet:
- enc.Encode(payload{Level: lvl.Level()})
+ return enc.Encode(payload{Level: lvl.Level()})
+
case http.MethodPut:
requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
- enc.Encode(errorResponse{Error: err.Error()})
- return
+ return enc.Encode(errorResponse{Error: err.Error()})
}
lvl.SetLevel(requestedLvl)
- enc.Encode(payload{Level: lvl.Level()})
+ return enc.Encode(payload{Level: lvl.Level()})
+
default:
w.WriteHeader(http.StatusMethodNotAllowed)
- enc.Encode(errorResponse{
+ return enc.Encode(errorResponse{
Error: "Only GET and PUT are supported.",
})
}
@@ -108,7 +117,7 @@
func decodePutURL(r *http.Request) (zapcore.Level, error) {
lvl := r.FormValue("level")
if lvl == "" {
- return 0, fmt.Errorf("must specify logging level")
+ return 0, errors.New("must specify logging level")
}
var l zapcore.Level
if err := l.UnmarshalText([]byte(lvl)); err != nil {
@@ -125,8 +134,7 @@
return 0, fmt.Errorf("malformed request body: %v", err)
}
if pld.Level == nil {
- return 0, fmt.Errorf("must specify logging level")
+ return 0, errors.New("must specify logging level")
}
return *pld.Level, nil
-
}
diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go
index dfc5b05..f673f99 100644
--- a/vendor/go.uber.org/zap/internal/exit/exit.go
+++ b/vendor/go.uber.org/zap/internal/exit/exit.go
@@ -24,24 +24,25 @@
import "os"
-var real = func() { os.Exit(1) }
+var _exit = os.Exit
-// Exit normally terminates the process by calling os.Exit(1). If the package
-// is stubbed, it instead records a call in the testing spy.
-func Exit() {
- real()
+// With terminates the process by calling os.Exit(code). If the package is
+// stubbed, it instead records a call in the testing spy.
+func With(code int) {
+ _exit(code)
}
// A StubbedExit is a testing fake for os.Exit.
type StubbedExit struct {
Exited bool
- prev func()
+ Code int
+ prev func(code int)
}
// Stub substitutes a fake for the call to os.Exit(1).
func Stub() *StubbedExit {
- s := &StubbedExit{prev: real}
- real = s.exit
+ s := &StubbedExit{prev: _exit}
+ _exit = s.exit
return s
}
@@ -56,9 +57,10 @@
// Unstub restores the previous exit function.
func (se *StubbedExit) Unstub() {
- real = se.prev
+ _exit = se.prev
}
-func (se *StubbedExit) exit() {
+func (se *StubbedExit) exit(code int) {
se.Exited = true
+ se.Code = code
}
diff --git a/vendor/go.uber.org/zap/global_prego112.go b/vendor/go.uber.org/zap/internal/level_enabler.go
similarity index 65%
rename from vendor/go.uber.org/zap/global_prego112.go
rename to vendor/go.uber.org/zap/internal/level_enabler.go
index d3ab9af..40bfed8 100644
--- a/vendor/go.uber.org/zap/global_prego112.go
+++ b/vendor/go.uber.org/zap/internal/level_enabler.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -18,9 +18,20 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-// See #682 for more information.
-// +build !go1.12
+// Package internal and its subpackages hold types and functionality
+// that are not part of Zap's public API.
+package internal
-package zap
+import "go.uber.org/zap/zapcore"
-const _stdLogDefaultDepth = 2
+// LeveledEnabler is an interface satisfied by LevelEnablers that are able to
+// report their own level.
+//
+// This interface is defined to use more conveniently in tests and non-zapcore
+// packages.
+// This cannot be imported from zapcore because of the cyclic dependency.
+type LeveledEnabler interface {
+ zapcore.LevelEnabler
+
+ Level() zapcore.Level
+}
diff --git a/vendor/go.uber.org/zap/internal/pool/pool.go b/vendor/go.uber.org/zap/internal/pool/pool.go
new file mode 100644
index 0000000..60e9d2c
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/pool/pool.go
@@ -0,0 +1,58 @@
+// Copyright (c) 2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package pool provides internal pool utilities.
+package pool
+
+import (
+ "sync"
+)
+
+// A Pool is a generic wrapper around [sync.Pool] to provide strongly-typed
+// object pooling.
+//
+// Note that SA6002 (ref: https://staticcheck.io/docs/checks/#SA6002) will
+// not be detected, so all internal pool use must take care to only store
+// pointer types.
+type Pool[T any] struct {
+ pool sync.Pool
+}
+
+// New returns a new [Pool] for T, and will use fn to construct new Ts when
+// the pool is empty.
+func New[T any](fn func() T) *Pool[T] {
+ return &Pool[T]{
+ pool: sync.Pool{
+ New: func() any {
+ return fn()
+ },
+ },
+ }
+}
+
+// Get gets a T from the pool, or creates a new one if the pool is empty.
+func (p *Pool[T]) Get() T {
+ return p.pool.Get().(T)
+}
+
+// Put returns x into the pool.
+func (p *Pool[T]) Put(x T) {
+ p.pool.Put(x)
+}
diff --git a/vendor/go.uber.org/zap/internal/stacktrace/stack.go b/vendor/go.uber.org/zap/internal/stacktrace/stack.go
new file mode 100644
index 0000000..82af755
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/stacktrace/stack.go
@@ -0,0 +1,181 @@
+// Copyright (c) 2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package stacktrace provides support for gathering stack traces
+// efficiently.
+package stacktrace
+
+import (
+ "runtime"
+
+ "go.uber.org/zap/buffer"
+ "go.uber.org/zap/internal/bufferpool"
+ "go.uber.org/zap/internal/pool"
+)
+
+var _stackPool = pool.New(func() *Stack {
+ return &Stack{
+ storage: make([]uintptr, 64),
+ }
+})
+
+// Stack is a captured stack trace.
+type Stack struct {
+ pcs []uintptr // program counters; always a subslice of storage
+ frames *runtime.Frames
+
+ // The size of pcs varies depending on requirements:
+ // it will be one if the only the first frame was requested,
+ // and otherwise it will reflect the depth of the call stack.
+ //
+ // storage decouples the slice we need (pcs) from the slice we pool.
+ // We will always allocate a reasonably large storage, but we'll use
+ // only as much of it as we need.
+ storage []uintptr
+}
+
+// Depth specifies how deep of a stack trace should be captured.
+type Depth int
+
+const (
+ // First captures only the first frame.
+ First Depth = iota
+
+ // Full captures the entire call stack, allocating more
+ // storage for it if needed.
+ Full
+)
+
+// Capture captures a stack trace of the specified depth, skipping
+// the provided number of frames. skip=0 identifies the caller of
+// Capture.
+//
+// The caller must call Free on the returned stacktrace after using it.
+func Capture(skip int, depth Depth) *Stack {
+ stack := _stackPool.Get()
+
+ switch depth {
+ case First:
+ stack.pcs = stack.storage[:1]
+ case Full:
+ stack.pcs = stack.storage
+ }
+
+ // Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers
+ // itself. +2 to skip captureStacktrace and runtime.Callers.
+ numFrames := runtime.Callers(
+ skip+2,
+ stack.pcs,
+ )
+
+ // runtime.Callers truncates the recorded stacktrace if there is no
+ // room in the provided slice. For the full stack trace, keep expanding
+ // storage until there are fewer frames than there is room.
+ if depth == Full {
+ pcs := stack.pcs
+ for numFrames == len(pcs) {
+ pcs = make([]uintptr, len(pcs)*2)
+ numFrames = runtime.Callers(skip+2, pcs)
+ }
+
+ // Discard old storage instead of returning it to the pool.
+ // This will adjust the pool size over time if stack traces are
+ // consistently very deep.
+ stack.storage = pcs
+ stack.pcs = pcs[:numFrames]
+ } else {
+ stack.pcs = stack.pcs[:numFrames]
+ }
+
+ stack.frames = runtime.CallersFrames(stack.pcs)
+ return stack
+}
+
+// Free releases resources associated with this stacktrace
+// and returns it back to the pool.
+func (st *Stack) Free() {
+ st.frames = nil
+ st.pcs = nil
+ _stackPool.Put(st)
+}
+
+// Count reports the total number of frames in this stacktrace.
+// Count DOES NOT change as Next is called.
+func (st *Stack) Count() int {
+ return len(st.pcs)
+}
+
+// Next returns the next frame in the stack trace,
+// and a boolean indicating whether there are more after it.
+func (st *Stack) Next() (_ runtime.Frame, more bool) {
+ return st.frames.Next()
+}
+
+// Take returns a string representation of the current stacktrace.
+//
+// skip is the number of frames to skip before recording the stack trace.
+// skip=0 identifies the caller of Take.
+func Take(skip int) string {
+ stack := Capture(skip+1, Full)
+ defer stack.Free()
+
+ buffer := bufferpool.Get()
+ defer buffer.Free()
+
+ stackfmt := NewFormatter(buffer)
+ stackfmt.FormatStack(stack)
+ return buffer.String()
+}
+
+// Formatter formats a stack trace into a readable string representation.
+type Formatter struct {
+ b *buffer.Buffer
+ nonEmpty bool // whehther we've written at least one frame already
+}
+
+// NewFormatter builds a new Formatter.
+func NewFormatter(b *buffer.Buffer) Formatter {
+ return Formatter{b: b}
+}
+
+// FormatStack formats all remaining frames in the provided stacktrace -- minus
+// the final runtime.main/runtime.goexit frame.
+func (sf *Formatter) FormatStack(stack *Stack) {
+ // Note: On the last iteration, frames.Next() returns false, with a valid
+ // frame, but we ignore this frame. The last frame is a runtime frame which
+ // adds noise, since it's only either runtime.main or runtime.goexit.
+ for frame, more := stack.Next(); more; frame, more = stack.Next() {
+ sf.FormatFrame(frame)
+ }
+}
+
+// FormatFrame formats the given frame.
+func (sf *Formatter) FormatFrame(frame runtime.Frame) {
+ if sf.nonEmpty {
+ sf.b.AppendByte('\n')
+ }
+ sf.nonEmpty = true
+ sf.b.AppendString(frame.Function)
+ sf.b.AppendByte('\n')
+ sf.b.AppendByte('\t')
+ sf.b.AppendString(frame.File)
+ sf.b.AppendByte(':')
+ sf.b.AppendInt(int64(frame.Line))
+}
diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go
index 3567a9a..155b208 100644
--- a/vendor/go.uber.org/zap/level.go
+++ b/vendor/go.uber.org/zap/level.go
@@ -21,7 +21,9 @@
package zap
import (
- "go.uber.org/atomic"
+ "sync/atomic"
+
+ "go.uber.org/zap/internal"
"go.uber.org/zap/zapcore"
)
@@ -70,12 +72,14 @@
l *atomic.Int32
}
+var _ internal.LeveledEnabler = AtomicLevel{}
+
// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging
// enabled.
func NewAtomicLevel() AtomicLevel {
- return AtomicLevel{
- l: atomic.NewInt32(int32(InfoLevel)),
- }
+ lvl := AtomicLevel{l: new(atomic.Int32)}
+ lvl.l.Store(int32(InfoLevel))
+ return lvl
}
// NewAtomicLevelAt is a convenience function that creates an AtomicLevel
@@ -86,6 +90,23 @@
return a
}
+// ParseAtomicLevel parses an AtomicLevel based on a lowercase or all-caps ASCII
+// representation of the log level. If the provided ASCII representation is
+// invalid an error is returned.
+//
+// This is particularly useful when dealing with text input to configure log
+// levels.
+func ParseAtomicLevel(text string) (AtomicLevel, error) {
+ a := NewAtomicLevel()
+ l, err := zapcore.ParseLevel(text)
+ if err != nil {
+ return a, err
+ }
+
+ a.SetLevel(l)
+ return a, nil
+}
+
// Enabled implements the zapcore.LevelEnabler interface, which allows the
// AtomicLevel to be used in place of traditional static levels.
func (lvl AtomicLevel) Enabled(l zapcore.Level) bool {
diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go
index f116bd9..c4d3003 100644
--- a/vendor/go.uber.org/zap/logger.go
+++ b/vendor/go.uber.org/zap/logger.go
@@ -22,11 +22,12 @@
import (
"fmt"
- "io/ioutil"
+ "io"
"os"
- "runtime"
"strings"
+ "go.uber.org/zap/internal/bufferpool"
+ "go.uber.org/zap/internal/stacktrace"
"go.uber.org/zap/zapcore"
)
@@ -42,7 +43,8 @@
development bool
addCaller bool
- onFatal zapcore.CheckWriteAction // default is WriteThenFatal
+ onPanic zapcore.CheckWriteHook // default is WriteThenPanic
+ onFatal zapcore.CheckWriteHook // default is WriteThenFatal
name string
errorOutput zapcore.WriteSyncer
@@ -85,7 +87,7 @@
func NewNop() *Logger {
return &Logger{
core: zapcore.NewNopCore(),
- errorOutput: zapcore.AddSync(ioutil.Discard),
+ errorOutput: zapcore.AddSync(io.Discard),
addStack: zapcore.FatalLevel + 1,
clock: zapcore.DefaultClock,
}
@@ -107,6 +109,19 @@
return NewDevelopmentConfig().Build(options...)
}
+// Must is a helper that wraps a call to a function returning (*Logger, error)
+// and panics if the error is non-nil. It is intended for use in variable
+// initialization such as:
+//
+// var logger = zap.Must(zap.NewProduction())
+func Must(logger *Logger, err error) *Logger {
+ if err != nil {
+ panic(err)
+ }
+
+ return logger
+}
+
// NewExample builds a Logger that's designed for use in zap's testable
// examples. It writes DebugLevel and above logs to standard out as JSON, but
// omits the timestamp and calling function to keep example output
@@ -160,7 +175,8 @@
}
// With creates a child logger and adds structured context to it. Fields added
-// to the child don't affect the parent, and vice versa.
+// to the child don't affect the parent, and vice versa. Any fields that
+// require evaluation (such as Objects) are evaluated upon invocation of With.
func (log *Logger) With(fields ...Field) *Logger {
if len(fields) == 0 {
return log
@@ -170,6 +186,35 @@
return l
}
+// WithLazy creates a child logger and adds structured context to it lazily.
+//
+// The fields are evaluated only if the logger is further chained with [With]
+// or is written to with any of the log level methods.
+// Until that occurs, the logger may retain references to objects inside the fields,
+// and logging will reflect the state of an object at the time of logging,
+// not the time of WithLazy().
+//
+// WithLazy provides a worthwhile performance optimization for contextual loggers
+// when the likelihood of using the child logger is low,
+// such as error paths and rarely taken branches.
+//
+// Similar to [With], fields added to the child don't affect the parent, and vice versa.
+func (log *Logger) WithLazy(fields ...Field) *Logger {
+ if len(fields) == 0 {
+ return log
+ }
+ return log.WithOptions(WrapCore(func(core zapcore.Core) zapcore.Core {
+ return zapcore.NewLazyWith(core, fields)
+ }))
+}
+
+// Level reports the minimum enabled level for this logger.
+//
+// For NopLoggers, this is [zapcore.InvalidLevel].
+func (log *Logger) Level() zapcore.Level {
+ return zapcore.LevelOf(log.core)
+}
+
// Check returns a CheckedEntry if logging a message at the specified level
// is enabled. It's a completely optional optimization; in high-performance
// applications, Check can help avoid allocating a slice to hold fields.
@@ -177,6 +222,16 @@
return log.check(lvl, msg)
}
+// Log logs a message at the specified level. The message includes any fields
+// passed at the log site, as well as any fields accumulated on the logger.
+// Any Fields that require evaluation (such as Objects) are evaluated upon
+// invocation of Log.
+func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) {
+ if ce := log.check(lvl, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
// Debug logs a message at DebugLevel. The message includes any fields passed
// at the log site, as well as any fields accumulated on the logger.
func (log *Logger) Debug(msg string, fields ...Field) {
@@ -253,14 +308,22 @@
return log.core
}
+// Name returns the Logger's underlying name,
+// or an empty string if the logger is unnamed.
+func (log *Logger) Name() string {
+ return log.name
+}
+
func (log *Logger) clone() *Logger {
- copy := *log
- return ©
+ clone := *log
+ return &clone
}
func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
- // check must always be called directly by a method in the Logger interface
- // (e.g., Check, Info, Fatal).
+ // Logger.check must always be called directly by a method in the
+ // Logger interface (e.g., Check, Info, Fatal).
+ // This skips Logger.check and the Info/Fatal/Check/etc. method that
+ // called it.
const callerSkipOffset = 2
// Check the level first to reduce the cost of disabled log calls.
@@ -283,18 +346,12 @@
// Set up any required terminal behavior.
switch ent.Level {
case zapcore.PanicLevel:
- ce = ce.Should(ent, zapcore.WriteThenPanic)
+ ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic))
case zapcore.FatalLevel:
- onFatal := log.onFatal
- // Noop is the default value for CheckWriteAction, and it leads to
- // continued execution after a Fatal which is unexpected.
- if onFatal == zapcore.WriteThenNoop {
- onFatal = zapcore.WriteThenFatal
- }
- ce = ce.Should(ent, onFatal)
+ ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenFatal, log.onFatal))
case zapcore.DPanicLevel:
if log.development {
- ce = ce.Should(ent, zapcore.WriteThenPanic)
+ ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic))
}
}
@@ -307,42 +364,72 @@
// Thread the error output through to the CheckedEntry.
ce.ErrorOutput = log.errorOutput
- if log.addCaller {
- frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset)
- if !defined {
- fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC())
- log.errorOutput.Sync()
- }
- ce.Entry.Caller = zapcore.EntryCaller{
- Defined: defined,
+ addStack := log.addStack.Enabled(ce.Level)
+ if !log.addCaller && !addStack {
+ return ce
+ }
+
+ // Adding the caller or stack trace requires capturing the callers of
+ // this function. We'll share information between these two.
+ stackDepth := stacktrace.First
+ if addStack {
+ stackDepth = stacktrace.Full
+ }
+ stack := stacktrace.Capture(log.callerSkip+callerSkipOffset, stackDepth)
+ defer stack.Free()
+
+ if stack.Count() == 0 {
+ if log.addCaller {
+ fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC())
+ _ = log.errorOutput.Sync()
+ }
+ return ce
+ }
+
+ frame, more := stack.Next()
+
+ if log.addCaller {
+ ce.Caller = zapcore.EntryCaller{
+ Defined: frame.PC != 0,
PC: frame.PC,
File: frame.File,
Line: frame.Line,
Function: frame.Function,
}
}
- if log.addStack.Enabled(ce.Entry.Level) {
- ce.Entry.Stack = StackSkip("", log.callerSkip+callerSkipOffset).String
+
+ if addStack {
+ buffer := bufferpool.Get()
+ defer buffer.Free()
+
+ stackfmt := stacktrace.NewFormatter(buffer)
+
+ // We've already extracted the first frame, so format that
+ // separately and defer to stackfmt for the rest.
+ stackfmt.FormatFrame(frame)
+ if more {
+ stackfmt.FormatStack(stack)
+ }
+ ce.Stack = buffer.String()
}
return ce
}
-// getCallerFrame gets caller frame. The argument skip is the number of stack
-// frames to ascend, with 0 identifying the caller of getCallerFrame. The
-// boolean ok is false if it was not possible to recover the information.
-//
-// Note: This implementation is similar to runtime.Caller, but it returns the whole frame.
-func getCallerFrame(skip int) (frame runtime.Frame, ok bool) {
- const skipOffset = 2 // skip getCallerFrame and Callers
-
- pc := make([]uintptr, 1)
- numFrames := runtime.Callers(skip+skipOffset, pc)
- if numFrames < 1 {
- return
+func terminalHookOverride(defaultHook, override zapcore.CheckWriteHook) zapcore.CheckWriteHook {
+ // A nil or WriteThenNoop hook will lead to continued execution after
+ // a Panic or Fatal log entry, which is unexpected. For example,
+ //
+ // f, err := os.Open(..)
+ // if err != nil {
+ // log.Fatal("cannot open", zap.Error(err))
+ // }
+ // fmt.Println(f.Name())
+ //
+ // The f.Name() will panic if we continue execution after the log.Fatal.
+ if override == nil || override == zapcore.WriteThenNoop {
+ return defaultHook
}
-
- frame, _ = runtime.CallersFrames(pc).Next()
- return frame, frame.PC != 0
+ return override
}
diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go
index e9e6616..43d357a 100644
--- a/vendor/go.uber.org/zap/options.go
+++ b/vendor/go.uber.org/zap/options.go
@@ -132,10 +132,44 @@
})
}
-// OnFatal sets the action to take on fatal logs.
-func OnFatal(action zapcore.CheckWriteAction) Option {
+// WithPanicHook sets a CheckWriteHook to run on Panic/DPanic logs.
+// Zap will call this hook after writing a log statement with a Panic/DPanic level.
+//
+// For example, the following builds a logger that will exit the current
+// goroutine after writing a Panic/DPanic log message, but it will not start a panic.
+//
+// zap.New(core, zap.WithPanicHook(zapcore.WriteThenGoexit))
+//
+// This is useful for testing Panic/DPanic log output.
+func WithPanicHook(hook zapcore.CheckWriteHook) Option {
return optionFunc(func(log *Logger) {
- log.onFatal = action
+ log.onPanic = hook
+ })
+}
+
+// OnFatal sets the action to take on fatal logs.
+//
+// Deprecated: Use [WithFatalHook] instead.
+func OnFatal(action zapcore.CheckWriteAction) Option {
+ return WithFatalHook(action)
+}
+
+// WithFatalHook sets a CheckWriteHook to run on fatal logs.
+// Zap will call this hook after writing a log statement with a Fatal level.
+//
+// For example, the following builds a logger that will exit the current
+// goroutine after writing a fatal log message, but it will not exit the
+// program.
+//
+// zap.New(core, zap.WithFatalHook(zapcore.WriteThenGoexit))
+//
+// It is important that the provided CheckWriteHook stops the control flow at
+// the current statement to meet expectations of callers of the logger.
+// We recommend calling os.Exit or runtime.Goexit inside custom hooks at
+// minimum.
+func WithFatalHook(hook zapcore.CheckWriteHook) Option {
+ return optionFunc(func(log *Logger) {
+ log.onFatal = hook
})
}
diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go
index df46fa8..499772a 100644
--- a/vendor/go.uber.org/zap/sink.go
+++ b/vendor/go.uber.org/zap/sink.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -26,6 +26,7 @@
"io"
"net/url"
"os"
+ "path/filepath"
"strings"
"sync"
@@ -34,23 +35,7 @@
const schemeFile = "file"
-var (
- _sinkMutex sync.RWMutex
- _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme
-)
-
-func init() {
- resetSinkRegistry()
-}
-
-func resetSinkRegistry() {
- _sinkMutex.Lock()
- defer _sinkMutex.Unlock()
-
- _sinkFactories = map[string]func(*url.URL) (Sink, error){
- schemeFile: newFileSink,
- }
-}
+var _sinkRegistry = newSinkRegistry()
// Sink defines the interface to write to and close logger destinations.
type Sink interface {
@@ -58,10 +43,6 @@
io.Closer
}
-type nopCloserSink struct{ zapcore.WriteSyncer }
-
-func (nopCloserSink) Close() error { return nil }
-
type errSinkNotFound struct {
scheme string
}
@@ -70,16 +51,30 @@
return fmt.Sprintf("no sink found for scheme %q", e.scheme)
}
-// RegisterSink registers a user-supplied factory for all sinks with a
-// particular scheme.
-//
-// All schemes must be ASCII, valid under section 3.1 of RFC 3986
-// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already
-// have a factory registered. Zap automatically registers a factory for the
-// "file" scheme.
-func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
- _sinkMutex.Lock()
- defer _sinkMutex.Unlock()
+type nopCloserSink struct{ zapcore.WriteSyncer }
+
+func (nopCloserSink) Close() error { return nil }
+
+type sinkRegistry struct {
+ mu sync.Mutex
+ factories map[string]func(*url.URL) (Sink, error) // keyed by scheme
+ openFile func(string, int, os.FileMode) (*os.File, error) // type matches os.OpenFile
+}
+
+func newSinkRegistry() *sinkRegistry {
+ sr := &sinkRegistry{
+ factories: make(map[string]func(*url.URL) (Sink, error)),
+ openFile: os.OpenFile,
+ }
+ // Infallible operation: the registry is empty, so we can't have a conflict.
+ _ = sr.RegisterSink(schemeFile, sr.newFileSinkFromURL)
+ return sr
+}
+
+// RegisterScheme registers the given factory for the specific scheme.
+func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
+ sr.mu.Lock()
+ defer sr.mu.Unlock()
if scheme == "" {
return errors.New("can't register a sink factory for empty string")
@@ -88,14 +83,22 @@
if err != nil {
return fmt.Errorf("%q is not a valid scheme: %v", scheme, err)
}
- if _, ok := _sinkFactories[normalized]; ok {
+ if _, ok := sr.factories[normalized]; ok {
return fmt.Errorf("sink factory already registered for scheme %q", normalized)
}
- _sinkFactories[normalized] = factory
+ sr.factories[normalized] = factory
return nil
}
-func newSink(rawURL string) (Sink, error) {
+func (sr *sinkRegistry) newSink(rawURL string) (Sink, error) {
+ // URL parsing doesn't work well for Windows paths such as `c:\log.txt`, as scheme is set to
+ // the drive, and path is unset unless `c:/log.txt` is used.
+ // To avoid Windows-specific URL handling, we instead check IsAbs to open as a file.
+ // filepath.IsAbs is OS-specific, so IsAbs('c:/log.txt') is false outside of Windows.
+ if filepath.IsAbs(rawURL) {
+ return sr.newFileSinkFromPath(rawURL)
+ }
+
u, err := url.Parse(rawURL)
if err != nil {
return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err)
@@ -104,16 +107,27 @@
u.Scheme = schemeFile
}
- _sinkMutex.RLock()
- factory, ok := _sinkFactories[u.Scheme]
- _sinkMutex.RUnlock()
+ sr.mu.Lock()
+ factory, ok := sr.factories[u.Scheme]
+ sr.mu.Unlock()
if !ok {
return nil, &errSinkNotFound{u.Scheme}
}
return factory(u)
}
-func newFileSink(u *url.URL) (Sink, error) {
+// RegisterSink registers a user-supplied factory for all sinks with a
+// particular scheme.
+//
+// All schemes must be ASCII, valid under section 0.1 of RFC 3986
+// (https://tools.ietf.org/html/rfc3983#section-3.1), and must not already
+// have a factory registered. Zap automatically registers a factory for the
+// "file" scheme.
+func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
+ return _sinkRegistry.RegisterSink(scheme, factory)
+}
+
+func (sr *sinkRegistry) newFileSinkFromURL(u *url.URL) (Sink, error) {
if u.User != nil {
return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u)
}
@@ -130,13 +144,18 @@
if hn := u.Hostname(); hn != "" && hn != "localhost" {
return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u)
}
- switch u.Path {
+
+ return sr.newFileSinkFromPath(u.Path)
+}
+
+func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) {
+ switch path {
case "stdout":
return nopCloserSink{os.Stdout}, nil
case "stderr":
return nopCloserSink{os.Stderr}, nil
}
- return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
+ return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o666)
}
func normalizeScheme(s string) (string, error) {
diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go
deleted file mode 100644
index 0cf8c1d..0000000
--- a/vendor/go.uber.org/zap/stacktrace.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zap
-
-import (
- "runtime"
- "sync"
-
- "go.uber.org/zap/internal/bufferpool"
-)
-
-var (
- _stacktracePool = sync.Pool{
- New: func() interface{} {
- return newProgramCounters(64)
- },
- }
-)
-
-func takeStacktrace(skip int) string {
- buffer := bufferpool.Get()
- defer buffer.Free()
- programCounters := _stacktracePool.Get().(*programCounters)
- defer _stacktracePool.Put(programCounters)
-
- var numFrames int
- for {
- // Skip the call to runtime.Callers and takeStacktrace so that the
- // program counters start at the caller of takeStacktrace.
- numFrames = runtime.Callers(skip+2, programCounters.pcs)
- if numFrames < len(programCounters.pcs) {
- break
- }
- // Don't put the too-short counter slice back into the pool; this lets
- // the pool adjust if we consistently take deep stacktraces.
- programCounters = newProgramCounters(len(programCounters.pcs) * 2)
- }
-
- i := 0
- frames := runtime.CallersFrames(programCounters.pcs[:numFrames])
-
- // Note: On the last iteration, frames.Next() returns false, with a valid
- // frame, but we ignore this frame. The last frame is a a runtime frame which
- // adds noise, since it's only either runtime.main or runtime.goexit.
- for frame, more := frames.Next(); more; frame, more = frames.Next() {
- if i != 0 {
- buffer.AppendByte('\n')
- }
- i++
- buffer.AppendString(frame.Function)
- buffer.AppendByte('\n')
- buffer.AppendByte('\t')
- buffer.AppendString(frame.File)
- buffer.AppendByte(':')
- buffer.AppendInt(int64(frame.Line))
- }
-
- return buffer.String()
-}
-
-type programCounters struct {
- pcs []uintptr
-}
-
-func newProgramCounters(size int) *programCounters {
- return &programCounters{make([]uintptr, size)}
-}
diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go
index 0b96519..8904cd0 100644
--- a/vendor/go.uber.org/zap/sugar.go
+++ b/vendor/go.uber.org/zap/sugar.go
@@ -31,6 +31,7 @@
const (
_oddNumberErrMsg = "Ignored key without a value."
_nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys."
+ _multipleErrMsg = "Multiple errors without a key."
)
// A SugaredLogger wraps the base Logger functionality in a slower, but less
@@ -38,10 +39,19 @@
// method.
//
// Unlike the Logger, the SugaredLogger doesn't insist on structured logging.
-// For each log level, it exposes three methods: one for loosely-typed
-// structured logging, one for println-style formatting, and one for
-// printf-style formatting. For example, SugaredLoggers can produce InfoLevel
-// output with Infow ("info with" structured context), Info, or Infof.
+// For each log level, it exposes four methods:
+//
+// - methods named after the log level for log.Print-style logging
+// - methods ending in "w" for loosely-typed structured logging
+// - methods ending in "f" for log.Printf-style logging
+// - methods ending in "ln" for log.Println-style logging
+//
+// For example, the methods for InfoLevel are:
+//
+// Info(...any) Print-style logging
+// Infow(...any) Structured logging (read as "info with")
+// Infof(string, ...any) Printf-style logging
+// Infoln(...any) Println-style logging
type SugaredLogger struct {
base *Logger
}
@@ -61,27 +71,40 @@
return &SugaredLogger{base: s.base.Named(name)}
}
+// WithOptions clones the current SugaredLogger, applies the supplied Options,
+// and returns the result. It's safe to use concurrently.
+func (s *SugaredLogger) WithOptions(opts ...Option) *SugaredLogger {
+ base := s.base.clone()
+ for _, opt := range opts {
+ opt.apply(base)
+ }
+ return &SugaredLogger{base: base}
+}
+
// With adds a variadic number of fields to the logging context. It accepts a
// mix of strongly-typed Field objects and loosely-typed key-value pairs. When
// processing pairs, the first element of the pair is used as the field key
// and the second as the field value.
//
// For example,
-// sugaredLogger.With(
-// "hello", "world",
-// "failure", errors.New("oh no"),
-// Stack(),
-// "count", 42,
-// "user", User{Name: "alice"},
-// )
+//
+// sugaredLogger.With(
+// "hello", "world",
+// "failure", errors.New("oh no"),
+// Stack(),
+// "count", 42,
+// "user", User{Name: "alice"},
+// )
+//
// is the equivalent of
-// unsugared.With(
-// String("hello", "world"),
-// String("failure", "oh no"),
-// Stack(),
-// Int("count", 42),
-// Object("user", User{Name: "alice"}),
-// )
+//
+// unsugared.With(
+// String("hello", "world"),
+// String("failure", "oh no"),
+// Stack(),
+// Int("count", 42),
+// Object("user", User{Name: "alice"}),
+// )
//
// Note that the keys in key-value pairs should be strings. In development,
// passing a non-string key panics. In production, the logger is more
@@ -92,83 +115,138 @@
return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)}
}
-// Debug uses fmt.Sprint to construct and log a message.
+// WithLazy adds a variadic number of fields to the logging context lazily.
+// The fields are evaluated only if the logger is further chained with [With]
+// or is written to with any of the log level methods.
+// Until that occurs, the logger may retain references to objects inside the fields,
+// and logging will reflect the state of an object at the time of logging,
+// not the time of WithLazy().
+//
+// Similar to [With], fields added to the child don't affect the parent,
+// and vice versa. Also, the keys in key-value pairs should be strings. In development,
+// passing a non-string key panics, while in production it logs an error and skips the pair.
+// Passing an orphaned key has the same behavior.
+func (s *SugaredLogger) WithLazy(args ...interface{}) *SugaredLogger {
+ return &SugaredLogger{base: s.base.WithLazy(s.sweetenFields(args)...)}
+}
+
+// Level reports the minimum enabled level for this logger.
+//
+// For NopLoggers, this is [zapcore.InvalidLevel].
+func (s *SugaredLogger) Level() zapcore.Level {
+ return zapcore.LevelOf(s.base.core)
+}
+
+// Log logs the provided arguments at provided level.
+// Spaces are added between arguments when neither is a string.
+func (s *SugaredLogger) Log(lvl zapcore.Level, args ...interface{}) {
+ s.log(lvl, "", args, nil)
+}
+
+// Debug logs the provided arguments at [DebugLevel].
+// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Debug(args ...interface{}) {
s.log(DebugLevel, "", args, nil)
}
-// Info uses fmt.Sprint to construct and log a message.
+// Info logs the provided arguments at [InfoLevel].
+// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Info(args ...interface{}) {
s.log(InfoLevel, "", args, nil)
}
-// Warn uses fmt.Sprint to construct and log a message.
+// Warn logs the provided arguments at [WarnLevel].
+// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Warn(args ...interface{}) {
s.log(WarnLevel, "", args, nil)
}
-// Error uses fmt.Sprint to construct and log a message.
+// Error logs the provided arguments at [ErrorLevel].
+// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Error(args ...interface{}) {
s.log(ErrorLevel, "", args, nil)
}
-// DPanic uses fmt.Sprint to construct and log a message. In development, the
-// logger then panics. (See DPanicLevel for details.)
+// DPanic logs the provided arguments at [DPanicLevel].
+// In development, the logger then panics. (See [DPanicLevel] for details.)
+// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) DPanic(args ...interface{}) {
s.log(DPanicLevel, "", args, nil)
}
-// Panic uses fmt.Sprint to construct and log a message, then panics.
+// Panic constructs a message with the provided arguments and panics.
+// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Panic(args ...interface{}) {
s.log(PanicLevel, "", args, nil)
}
-// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit.
+// Fatal constructs a message with the provided arguments and calls os.Exit.
+// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Fatal(args ...interface{}) {
s.log(FatalLevel, "", args, nil)
}
-// Debugf uses fmt.Sprintf to log a templated message.
+// Logf formats the message according to the format specifier
+// and logs it at provided level.
+func (s *SugaredLogger) Logf(lvl zapcore.Level, template string, args ...interface{}) {
+ s.log(lvl, template, args, nil)
+}
+
+// Debugf formats the message according to the format specifier
+// and logs it at [DebugLevel].
func (s *SugaredLogger) Debugf(template string, args ...interface{}) {
s.log(DebugLevel, template, args, nil)
}
-// Infof uses fmt.Sprintf to log a templated message.
+// Infof formats the message according to the format specifier
+// and logs it at [InfoLevel].
func (s *SugaredLogger) Infof(template string, args ...interface{}) {
s.log(InfoLevel, template, args, nil)
}
-// Warnf uses fmt.Sprintf to log a templated message.
+// Warnf formats the message according to the format specifier
+// and logs it at [WarnLevel].
func (s *SugaredLogger) Warnf(template string, args ...interface{}) {
s.log(WarnLevel, template, args, nil)
}
-// Errorf uses fmt.Sprintf to log a templated message.
+// Errorf formats the message according to the format specifier
+// and logs it at [ErrorLevel].
func (s *SugaredLogger) Errorf(template string, args ...interface{}) {
s.log(ErrorLevel, template, args, nil)
}
-// DPanicf uses fmt.Sprintf to log a templated message. In development, the
-// logger then panics. (See DPanicLevel for details.)
+// DPanicf formats the message according to the format specifier
+// and logs it at [DPanicLevel].
+// In development, the logger then panics. (See [DPanicLevel] for details.)
func (s *SugaredLogger) DPanicf(template string, args ...interface{}) {
s.log(DPanicLevel, template, args, nil)
}
-// Panicf uses fmt.Sprintf to log a templated message, then panics.
+// Panicf formats the message according to the format specifier
+// and panics.
func (s *SugaredLogger) Panicf(template string, args ...interface{}) {
s.log(PanicLevel, template, args, nil)
}
-// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit.
+// Fatalf formats the message according to the format specifier
+// and calls os.Exit.
func (s *SugaredLogger) Fatalf(template string, args ...interface{}) {
s.log(FatalLevel, template, args, nil)
}
+// Logw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Logw(lvl zapcore.Level, msg string, keysAndValues ...interface{}) {
+ s.log(lvl, msg, nil, keysAndValues)
+}
+
// Debugw logs a message with some additional context. The variadic key-value
// pairs are treated as they are in With.
//
// When debug-level logging is disabled, this is much faster than
-// s.With(keysAndValues).Debug(msg)
+//
+// s.With(keysAndValues).Debug(msg)
func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) {
s.log(DebugLevel, msg, nil, keysAndValues)
}
@@ -210,11 +288,61 @@
s.log(FatalLevel, msg, nil, keysAndValues)
}
+// Logln logs a message at provided level.
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Logln(lvl zapcore.Level, args ...interface{}) {
+ s.logln(lvl, args, nil)
+}
+
+// Debugln logs a message at [DebugLevel].
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Debugln(args ...interface{}) {
+ s.logln(DebugLevel, args, nil)
+}
+
+// Infoln logs a message at [InfoLevel].
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Infoln(args ...interface{}) {
+ s.logln(InfoLevel, args, nil)
+}
+
+// Warnln logs a message at [WarnLevel].
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Warnln(args ...interface{}) {
+ s.logln(WarnLevel, args, nil)
+}
+
+// Errorln logs a message at [ErrorLevel].
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Errorln(args ...interface{}) {
+ s.logln(ErrorLevel, args, nil)
+}
+
+// DPanicln logs a message at [DPanicLevel].
+// In development, the logger then panics. (See [DPanicLevel] for details.)
+// Spaces are always added between arguments.
+func (s *SugaredLogger) DPanicln(args ...interface{}) {
+ s.logln(DPanicLevel, args, nil)
+}
+
+// Panicln logs a message at [PanicLevel] and panics.
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Panicln(args ...interface{}) {
+ s.logln(PanicLevel, args, nil)
+}
+
+// Fatalln logs a message at [FatalLevel] and calls os.Exit.
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Fatalln(args ...interface{}) {
+ s.logln(FatalLevel, args, nil)
+}
+
// Sync flushes any buffered log entries.
func (s *SugaredLogger) Sync() error {
return s.base.Sync()
}
+// log message with Sprint, Sprintf, or neither.
func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) {
// If logging at this level is completely disabled, skip the overhead of
// string formatting.
@@ -228,6 +356,18 @@
}
}
+// logln message with Sprintln
+func (s *SugaredLogger) logln(lvl zapcore.Level, fmtArgs []interface{}, context []interface{}) {
+ if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) {
+ return
+ }
+
+ msg := getMessageln(fmtArgs)
+ if ce := s.base.Check(lvl, msg); ce != nil {
+ ce.Write(s.sweetenFields(context)...)
+ }
+}
+
// getMessage format with Sprint, Sprintf, or neither.
func getMessage(template string, fmtArgs []interface{}) string {
if len(fmtArgs) == 0 {
@@ -246,15 +386,24 @@
return fmt.Sprint(fmtArgs...)
}
+// getMessageln format with Sprintln.
+func getMessageln(fmtArgs []interface{}) string {
+ msg := fmt.Sprintln(fmtArgs...)
+ return msg[:len(msg)-1]
+}
+
func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
if len(args) == 0 {
return nil
}
- // Allocate enough space for the worst case; if users pass only structured
- // fields, we shouldn't penalize them with extra allocations.
- fields := make([]Field, 0, len(args))
- var invalid invalidPairs
+ var (
+ // Allocate enough space for the worst case; if users pass only structured
+ // fields, we shouldn't penalize them with extra allocations.
+ fields = make([]Field, 0, len(args))
+ invalid invalidPairs
+ seenError bool
+ )
for i := 0; i < len(args); {
// This is a strongly-typed field. Consume it and move on.
@@ -264,6 +413,18 @@
continue
}
+ // If it is an error, consume it and move on.
+ if err, ok := args[i].(error); ok {
+ if !seenError {
+ seenError = true
+ fields = append(fields, Error(err))
+ } else {
+ s.base.Error(_multipleErrMsg, Error(err))
+ }
+ i++
+ continue
+ }
+
// Make sure this element isn't a dangling key.
if i == len(args)-1 {
s.base.Error(_oddNumberErrMsg, Any("ignored", args[i]))
diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go
index 86a709a..06768c6 100644
--- a/vendor/go.uber.org/zap/writer.go
+++ b/vendor/go.uber.org/zap/writer.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -23,7 +23,6 @@
import (
"fmt"
"io"
- "io/ioutil"
"go.uber.org/zap/zapcore"
@@ -49,40 +48,40 @@
// os.Stdout and os.Stderr. When specified without a scheme, relative file
// paths also work.
func Open(paths ...string) (zapcore.WriteSyncer, func(), error) {
- writers, close, err := open(paths)
+ writers, closeAll, err := open(paths)
if err != nil {
return nil, nil, err
}
writer := CombineWriteSyncers(writers...)
- return writer, close, nil
+ return writer, closeAll, nil
}
func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
writers := make([]zapcore.WriteSyncer, 0, len(paths))
closers := make([]io.Closer, 0, len(paths))
- close := func() {
+ closeAll := func() {
for _, c := range closers {
- c.Close()
+ _ = c.Close()
}
}
var openErr error
for _, path := range paths {
- sink, err := newSink(path)
+ sink, err := _sinkRegistry.newSink(path)
if err != nil {
- openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err))
+ openErr = multierr.Append(openErr, fmt.Errorf("open sink %q: %w", path, err))
continue
}
writers = append(writers, sink)
closers = append(closers, sink)
}
if openErr != nil {
- close()
- return writers, nil, openErr
+ closeAll()
+ return nil, nil, openErr
}
- return writers, close, nil
+ return writers, closeAll, nil
}
// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a
@@ -93,7 +92,7 @@
// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually.
func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer {
if len(writers) == 0 {
- return zapcore.AddSync(ioutil.Discard)
+ return zapcore.AddSync(io.Discard)
}
return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...))
}
diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
index 0c1436f..a40e93b 100644
--- a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
+++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
@@ -43,6 +43,37 @@
//
// BufferedWriteSyncer is safe for concurrent use. You don't need to use
// zapcore.Lock for WriteSyncers with BufferedWriteSyncer.
+//
+// To set up a BufferedWriteSyncer, construct a WriteSyncer for your log
+// destination (*os.File is a valid WriteSyncer), wrap it with
+// BufferedWriteSyncer, and defer a Stop() call for when you no longer need the
+// object.
+//
+// func main() {
+// ws := ... // your log destination
+// bws := &zapcore.BufferedWriteSyncer{WS: ws}
+// defer bws.Stop()
+//
+// // ...
+// core := zapcore.NewCore(enc, bws, lvl)
+// logger := zap.New(core)
+//
+// // ...
+// }
+//
+// By default, a BufferedWriteSyncer will buffer up to 256 kilobytes of logs,
+// waiting at most 30 seconds between flushes.
+// You can customize these parameters by setting the Size or FlushInterval
+// fields.
+// For example, the following buffers up to 512 kB of logs before flushing them
+// to Stderr, with a maximum of one minute between each flush.
+//
+// ws := &BufferedWriteSyncer{
+// WS: os.Stderr,
+// Size: 512 * 1024, // 512 kB
+// FlushInterval: time.Minute,
+// }
+// defer ws.Stop()
type BufferedWriteSyncer struct {
// WS is the WriteSyncer around which BufferedWriteSyncer will buffer
// writes.
@@ -71,10 +102,10 @@
// unexported fields for state
mu sync.Mutex
initialized bool // whether initialize() has run
+ stopped bool // whether Stop() has run
writer *bufio.Writer
ticker *time.Ticker
stop chan struct{} // closed when flushLoop should stop
- stopped bool // whether Stop() has run
done chan struct{} // closed when flushLoop has stopped
}
diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go
index d2ea95b..422fd82 100644
--- a/vendor/go.uber.org/zap/zapcore/clock.go
+++ b/vendor/go.uber.org/zap/zapcore/clock.go
@@ -20,9 +20,7 @@
package zapcore
-import (
- "time"
-)
+import "time"
// DefaultClock is the default clock used by Zap in operations that require
// time. This clock uses the system clock for all operations.
diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go
index 2307af4..cc2b4e0 100644
--- a/vendor/go.uber.org/zap/zapcore/console_encoder.go
+++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go
@@ -22,20 +22,20 @@
import (
"fmt"
- "sync"
"go.uber.org/zap/buffer"
"go.uber.org/zap/internal/bufferpool"
+ "go.uber.org/zap/internal/pool"
)
-var _sliceEncoderPool = sync.Pool{
- New: func() interface{} {
- return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)}
- },
-}
+var _sliceEncoderPool = pool.New(func() *sliceArrayEncoder {
+ return &sliceArrayEncoder{
+ elems: make([]interface{}, 0, 2),
+ }
+})
func getSliceEncoder() *sliceArrayEncoder {
- return _sliceEncoderPool.Get().(*sliceArrayEncoder)
+ return _sliceEncoderPool.Get()
}
func putSliceEncoder(e *sliceArrayEncoder) {
@@ -77,7 +77,7 @@
// If this ever becomes a performance bottleneck, we can implement
// ArrayEncoder for our plain-text format.
arr := getSliceEncoder()
- if c.TimeKey != "" && c.EncodeTime != nil {
+ if c.TimeKey != "" && c.EncodeTime != nil && !ent.Time.IsZero() {
c.EncodeTime(ent.Time, arr)
}
if c.LevelKey != "" && c.EncodeLevel != nil {
@@ -125,11 +125,7 @@
line.AppendString(ent.Stack)
}
- if c.LineEnding != "" {
- line.AppendString(c.LineEnding)
- } else {
- line.AppendString(DefaultLineEnding)
- }
+ line.AppendString(c.LineEnding)
return line, nil
}
diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go
index a1ef8b0..776e93f 100644
--- a/vendor/go.uber.org/zap/zapcore/core.go
+++ b/vendor/go.uber.org/zap/zapcore/core.go
@@ -69,6 +69,15 @@
out WriteSyncer
}
+var (
+ _ Core = (*ioCore)(nil)
+ _ leveledEnabler = (*ioCore)(nil)
+)
+
+func (c *ioCore) Level() Level {
+ return LevelOf(c.LevelEnabler)
+}
+
func (c *ioCore) With(fields []Field) Core {
clone := c.clone()
addFields(clone.enc, fields)
@@ -93,9 +102,9 @@
return err
}
if ent.Level > ErrorLevel {
- // Since we may be crashing the program, sync the output. Ignore Sync
- // errors, pending a clean solution to issue #370.
- c.Sync()
+ // Since we may be crashing the program, sync the output.
+ // Ignore Sync errors, pending a clean solution to issue #370.
+ _ = c.Sync()
}
return nil
}
diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go
index 6601ca1..0446254 100644
--- a/vendor/go.uber.org/zap/zapcore/encoder.go
+++ b/vendor/go.uber.org/zap/zapcore/encoder.go
@@ -22,6 +22,7 @@
import (
"encoding/json"
+ "io"
"time"
"go.uber.org/zap/buffer"
@@ -36,6 +37,9 @@
const OmitKey = ""
// A LevelEncoder serializes a Level to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type LevelEncoder func(Level, PrimitiveArrayEncoder)
// LowercaseLevelEncoder serializes a Level to a lowercase string. For example,
@@ -89,6 +93,9 @@
}
// A TimeEncoder serializes a time.Time to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type TimeEncoder func(time.Time, PrimitiveArrayEncoder)
// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds
@@ -187,10 +194,13 @@
// UnmarshalYAML unmarshals YAML to a TimeEncoder.
// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout.
-// timeEncoder:
-// layout: 06/01/02 03:04pm
+//
+// timeEncoder:
+// layout: 06/01/02 03:04pm
+//
// If value is string, it uses UnmarshalText.
-// timeEncoder: iso8601
+//
+// timeEncoder: iso8601
func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error {
var o struct {
Layout string `json:"layout" yaml:"layout"`
@@ -215,6 +225,9 @@
}
// A DurationEncoder serializes a time.Duration to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type DurationEncoder func(time.Duration, PrimitiveArrayEncoder)
// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed.
@@ -258,6 +271,9 @@
}
// A CallerEncoder serializes an EntryCaller to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder)
// FullCallerEncoder serializes a caller in /full/path/to/package/file:line
@@ -288,6 +304,9 @@
// A NameEncoder serializes a period-separated logger name to a primitive
// type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type NameEncoder func(string, PrimitiveArrayEncoder)
// FullNameEncoder serializes the logger name as-is.
@@ -312,14 +331,15 @@
type EncoderConfig struct {
// Set the keys used for each log entry. If any key is empty, that portion
// of the entry is omitted.
- MessageKey string `json:"messageKey" yaml:"messageKey"`
- LevelKey string `json:"levelKey" yaml:"levelKey"`
- TimeKey string `json:"timeKey" yaml:"timeKey"`
- NameKey string `json:"nameKey" yaml:"nameKey"`
- CallerKey string `json:"callerKey" yaml:"callerKey"`
- FunctionKey string `json:"functionKey" yaml:"functionKey"`
- StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
- LineEnding string `json:"lineEnding" yaml:"lineEnding"`
+ MessageKey string `json:"messageKey" yaml:"messageKey"`
+ LevelKey string `json:"levelKey" yaml:"levelKey"`
+ TimeKey string `json:"timeKey" yaml:"timeKey"`
+ NameKey string `json:"nameKey" yaml:"nameKey"`
+ CallerKey string `json:"callerKey" yaml:"callerKey"`
+ FunctionKey string `json:"functionKey" yaml:"functionKey"`
+ StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
+ SkipLineEnding bool `json:"skipLineEnding" yaml:"skipLineEnding"`
+ LineEnding string `json:"lineEnding" yaml:"lineEnding"`
// Configure the primitive representations of common complex types. For
// example, some users may want all time.Times serialized as floating-point
// seconds since epoch, while others may prefer ISO8601 strings.
@@ -330,6 +350,9 @@
// Unlike the other primitive type encoders, EncodeName is optional. The
// zero value falls back to FullNameEncoder.
EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"`
+ // Configure the encoder for interface{} type objects.
+ // If not provided, objects are encoded using json.Encoder
+ NewReflectedEncoder func(io.Writer) ReflectedEncoder `json:"-" yaml:"-"`
// Configures the field separator used by the console encoder. Defaults
// to tab.
ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"`
diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go
index 2d815fe..459a5d7 100644
--- a/vendor/go.uber.org/zap/zapcore/entry.go
+++ b/vendor/go.uber.org/zap/zapcore/entry.go
@@ -24,26 +24,23 @@
"fmt"
"runtime"
"strings"
- "sync"
"time"
+ "go.uber.org/multierr"
"go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/internal/exit"
-
- "go.uber.org/multierr"
+ "go.uber.org/zap/internal/pool"
)
-var (
- _cePool = sync.Pool{New: func() interface{} {
- // Pre-allocate some space for cores.
- return &CheckedEntry{
- cores: make([]Core, 4),
- }
- }}
-)
+var _cePool = pool.New(func() *CheckedEntry {
+ // Pre-allocate some space for cores.
+ return &CheckedEntry{
+ cores: make([]Core, 4),
+ }
+})
func getCheckedEntry() *CheckedEntry {
- ce := _cePool.Get().(*CheckedEntry)
+ ce := _cePool.Get()
ce.reset()
return ce
}
@@ -152,6 +149,27 @@
Stack string
}
+// CheckWriteHook is a custom action that may be executed after an entry is
+// written.
+//
+// Register one on a CheckedEntry with the After method.
+//
+// if ce := logger.Check(...); ce != nil {
+// ce = ce.After(hook)
+// ce.Write(...)
+// }
+//
+// You can configure the hook for Fatal log statements at the logger level with
+// the zap.WithFatalHook option.
+type CheckWriteHook interface {
+ // OnWrite is invoked with the CheckedEntry that was written and a list
+ // of fields added with that entry.
+ //
+ // The list of fields DOES NOT include fields that were already added
+ // to the logger with the With method.
+ OnWrite(*CheckedEntry, []Field)
+}
+
// CheckWriteAction indicates what action to take after a log entry is
// processed. Actions are ordered in increasing severity.
type CheckWriteAction uint8
@@ -164,21 +182,36 @@
WriteThenGoexit
// WriteThenPanic causes a panic after Write.
WriteThenPanic
- // WriteThenFatal causes a fatal os.Exit after Write.
+ // WriteThenFatal causes an os.Exit(1) after Write.
WriteThenFatal
)
+// OnWrite implements the OnWrite method to keep CheckWriteAction compatible
+// with the new CheckWriteHook interface which deprecates CheckWriteAction.
+func (a CheckWriteAction) OnWrite(ce *CheckedEntry, _ []Field) {
+ switch a {
+ case WriteThenGoexit:
+ runtime.Goexit()
+ case WriteThenPanic:
+ panic(ce.Message)
+ case WriteThenFatal:
+ exit.With(1)
+ }
+}
+
+var _ CheckWriteHook = CheckWriteAction(0)
+
// CheckedEntry is an Entry together with a collection of Cores that have
// already agreed to log it.
//
-// CheckedEntry references should be created by calling AddCore or Should on a
+// CheckedEntry references should be created by calling AddCore or After on a
// nil *CheckedEntry. References are returned to a pool after Write, and MUST
// NOT be retained after calling their Write method.
type CheckedEntry struct {
Entry
ErrorOutput WriteSyncer
dirty bool // best-effort detection of pool misuse
- should CheckWriteAction
+ after CheckWriteHook
cores []Core
}
@@ -186,7 +219,7 @@
ce.Entry = Entry{}
ce.ErrorOutput = nil
ce.dirty = false
- ce.should = WriteThenNoop
+ ce.after = nil
for i := range ce.cores {
// don't keep references to cores
ce.cores[i] = nil
@@ -209,7 +242,7 @@
// CheckedEntry is being used after it was returned to the pool,
// the message may be an amalgamation from multiple call sites.
fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry)
- ce.ErrorOutput.Sync()
+ _ = ce.ErrorOutput.Sync() // ignore error
}
return
}
@@ -219,24 +252,16 @@
for i := range ce.cores {
err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields))
}
- if ce.ErrorOutput != nil {
- if err != nil {
- fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err)
- ce.ErrorOutput.Sync()
- }
+ if err != nil && ce.ErrorOutput != nil {
+ fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err)
+ _ = ce.ErrorOutput.Sync() // ignore error
}
- should, msg := ce.should, ce.Message
+ hook := ce.after
+ if hook != nil {
+ hook.OnWrite(ce, fields)
+ }
putCheckedEntry(ce)
-
- switch should {
- case WriteThenPanic:
- panic(msg)
- case WriteThenFatal:
- exit.Exit()
- case WriteThenGoexit:
- runtime.Goexit()
- }
}
// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be
@@ -254,11 +279,20 @@
// Should sets this CheckedEntry's CheckWriteAction, which controls whether a
// Core will panic or fatal after writing this log entry. Like AddCore, it's
// safe to call on nil CheckedEntry references.
+//
+// Deprecated: Use [CheckedEntry.After] instead.
func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry {
+ return ce.After(ent, should)
+}
+
+// After sets this CheckEntry's CheckWriteHook, which will be called after this
+// log entry has been written. It's safe to call this on nil CheckedEntry
+// references.
+func (ce *CheckedEntry) After(ent Entry, hook CheckWriteHook) *CheckedEntry {
if ce == nil {
ce = getCheckedEntry()
ce.Entry = ent
}
- ce.should = should
+ ce.after = hook
return ce
}
diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go
index f2a07d7..c40df13 100644
--- a/vendor/go.uber.org/zap/zapcore/error.go
+++ b/vendor/go.uber.org/zap/zapcore/error.go
@@ -23,7 +23,8 @@
import (
"fmt"
"reflect"
- "sync"
+
+ "go.uber.org/zap/internal/pool"
)
// Encodes the given error into fields of an object. A field with the given
@@ -36,13 +37,13 @@
// causer (from github.com/pkg/errors), a ${key}Causes field is added with an
// array of objects containing the errors this error was comprised of.
//
-// {
-// "error": err.Error(),
-// "errorVerbose": fmt.Sprintf("%+v", err),
-// "errorCauses": [
-// ...
-// ],
-// }
+// {
+// "error": err.Error(),
+// "errorVerbose": fmt.Sprintf("%+v", err),
+// "errorCauses": [
+// ...
+// ],
+// }
func encodeError(key string, err error, enc ObjectEncoder) (retErr error) {
// Try to capture panics (from nil references or otherwise) when calling
// the Error() method
@@ -83,7 +84,7 @@
Errors() []error
}
-// Note that errArry and errArrayElem are very similar to the version
+// Note that errArray and errArrayElem are very similar to the version
// implemented in the top-level error.go file. We can't re-use this because
// that would require exporting errArray as part of the zapcore API.
@@ -97,15 +98,18 @@
}
el := newErrArrayElem(errs[i])
- arr.AppendObject(el)
+ err := arr.AppendObject(el)
el.Free()
+ if err != nil {
+ return err
+ }
}
return nil
}
-var _errArrayElemPool = sync.Pool{New: func() interface{} {
+var _errArrayElemPool = pool.New(func() *errArrayElem {
return &errArrayElem{}
-}}
+})
// Encodes any error into a {"error": ...} re-using the same errors logic.
//
@@ -113,7 +117,7 @@
type errArrayElem struct{ err error }
func newErrArrayElem(err error) *errArrayElem {
- e := _errArrayElemPool.Get().(*errArrayElem)
+ e := _errArrayElemPool.Get()
e.err = err
return e
}
diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go
index 95bdb0a..308c978 100644
--- a/vendor/go.uber.org/zap/zapcore/field.go
+++ b/vendor/go.uber.org/zap/zapcore/field.go
@@ -47,7 +47,7 @@
ByteStringType
// Complex128Type indicates that the field carries a complex128.
Complex128Type
- // Complex64Type indicates that the field carries a complex128.
+ // Complex64Type indicates that the field carries a complex64.
Complex64Type
// DurationType indicates that the field carries a time.Duration.
DurationType
diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go
index 5db4afb..198def9 100644
--- a/vendor/go.uber.org/zap/zapcore/hook.go
+++ b/vendor/go.uber.org/zap/zapcore/hook.go
@@ -27,6 +27,11 @@
funcs []func(Entry) error
}
+var (
+ _ Core = (*hooked)(nil)
+ _ leveledEnabler = (*hooked)(nil)
+)
+
// RegisterHooks wraps a Core and runs a collection of user-defined callback
// hooks each time a message is logged. Execution of the callbacks is blocking.
//
@@ -40,6 +45,10 @@
}
}
+func (h *hooked) Level() Level {
+ return LevelOf(h.Core)
+}
+
func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
// Let the wrapped Core decide whether to log this message or not. This
// also gives the downstream a chance to register itself directly with the
diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go
index 5a17492..7a11237 100644
--- a/vendor/go.uber.org/zap/zapcore/increase_level.go
+++ b/vendor/go.uber.org/zap/zapcore/increase_level.go
@@ -27,6 +27,11 @@
level LevelEnabler
}
+var (
+ _ Core = (*levelFilterCore)(nil)
+ _ leveledEnabler = (*levelFilterCore)(nil)
+)
+
// NewIncreaseLevelCore creates a core that can be used to increase the level of
// an existing Core. It cannot be used to decrease the logging level, as it acts
// as a filter before calling the underlying core. If level decreases the log level,
@@ -45,6 +50,10 @@
return c.level.Enabled(lvl)
}
+func (c *levelFilterCore) Level() Level {
+ return LevelOf(c.level)
+}
+
func (c *levelFilterCore) With(fields []Field) Core {
return &levelFilterCore{c.core.With(fields), c.level}
}
diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go
index 5cf7d91..9685169 100644
--- a/vendor/go.uber.org/zap/zapcore/json_encoder.go
+++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go
@@ -22,26 +22,21 @@
import (
"encoding/base64"
- "encoding/json"
"math"
- "sync"
"time"
"unicode/utf8"
"go.uber.org/zap/buffer"
"go.uber.org/zap/internal/bufferpool"
+ "go.uber.org/zap/internal/pool"
)
// For JSON-escaping; see jsonEncoder.safeAddString below.
const _hex = "0123456789abcdef"
-var _jsonPool = sync.Pool{New: func() interface{} {
+var _jsonPool = pool.New(func() *jsonEncoder {
return &jsonEncoder{}
-}}
-
-func getJSONEncoder() *jsonEncoder {
- return _jsonPool.Get().(*jsonEncoder)
-}
+})
func putJSONEncoder(enc *jsonEncoder) {
if enc.reflectBuf != nil {
@@ -64,7 +59,7 @@
// for encoding generic values by reflection
reflectBuf *buffer.Buffer
- reflectEnc *json.Encoder
+ reflectEnc ReflectedEncoder
}
// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder
@@ -72,7 +67,9 @@
//
// Note that the encoder doesn't deduplicate keys, so it's possible to produce
// a message like
-// {"foo":"bar","foo":"baz"}
+//
+// {"foo":"bar","foo":"baz"}
+//
// This is permitted by the JSON specification, but not encouraged. Many
// libraries will ignore duplicate key-value pairs (typically keeping the last
// pair) when unmarshaling, but users should attempt to avoid adding duplicate
@@ -82,6 +79,17 @@
}
func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder {
+ if cfg.SkipLineEnding {
+ cfg.LineEnding = ""
+ } else if cfg.LineEnding == "" {
+ cfg.LineEnding = DefaultLineEnding
+ }
+
+ // If no EncoderConfig.NewReflectedEncoder is provided by the user, then use default
+ if cfg.NewReflectedEncoder == nil {
+ cfg.NewReflectedEncoder = defaultReflectedEncoder
+ }
+
return &jsonEncoder{
EncoderConfig: &cfg,
buf: bufferpool.Get(),
@@ -118,6 +126,11 @@
enc.AppendComplex128(val)
}
+func (enc *jsonEncoder) AddComplex64(key string, val complex64) {
+ enc.addKey(key)
+ enc.AppendComplex64(val)
+}
+
func (enc *jsonEncoder) AddDuration(key string, val time.Duration) {
enc.addKey(key)
enc.AppendDuration(val)
@@ -128,6 +141,11 @@
enc.AppendFloat64(val)
}
+func (enc *jsonEncoder) AddFloat32(key string, val float32) {
+ enc.addKey(key)
+ enc.AppendFloat32(val)
+}
+
func (enc *jsonEncoder) AddInt64(key string, val int64) {
enc.addKey(key)
enc.AppendInt64(val)
@@ -136,10 +154,7 @@
func (enc *jsonEncoder) resetReflectBuf() {
if enc.reflectBuf == nil {
enc.reflectBuf = bufferpool.Get()
- enc.reflectEnc = json.NewEncoder(enc.reflectBuf)
-
- // For consistency with our custom JSON encoder.
- enc.reflectEnc.SetEscapeHTML(false)
+ enc.reflectEnc = enc.NewReflectedEncoder(enc.reflectBuf)
} else {
enc.reflectBuf.Reset()
}
@@ -201,10 +216,16 @@
}
func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error {
+ // Close ONLY new openNamespaces that are created during
+ // AppendObject().
+ old := enc.openNamespaces
+ enc.openNamespaces = 0
enc.addElementSeparator()
enc.buf.AppendByte('{')
err := obj.MarshalLogObject(enc)
enc.buf.AppendByte('}')
+ enc.closeOpenNamespaces()
+ enc.openNamespaces = old
return err
}
@@ -220,16 +241,23 @@
enc.buf.AppendByte('"')
}
-func (enc *jsonEncoder) AppendComplex128(val complex128) {
+// appendComplex appends the encoded form of the provided complex128 value.
+// precision specifies the encoding precision for the real and imaginary
+// components of the complex number.
+func (enc *jsonEncoder) appendComplex(val complex128, precision int) {
enc.addElementSeparator()
// Cast to a platform-independent, fixed-size type.
r, i := float64(real(val)), float64(imag(val))
enc.buf.AppendByte('"')
// Because we're always in a quoted string, we can use strconv without
// special-casing NaN and +/-Inf.
- enc.buf.AppendFloat(r, 64)
- enc.buf.AppendByte('+')
- enc.buf.AppendFloat(i, 64)
+ enc.buf.AppendFloat(r, precision)
+ // If imaginary part is less than 0, minus (-) sign is added by default
+ // by AppendFloat.
+ if i >= 0 {
+ enc.buf.AppendByte('+')
+ }
+ enc.buf.AppendFloat(i, precision)
enc.buf.AppendByte('i')
enc.buf.AppendByte('"')
}
@@ -292,29 +320,28 @@
enc.buf.AppendUint(val)
}
-func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) }
-func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) }
-func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) }
-func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
-func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
-func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.appendComplex(complex128(v), 32) }
+func (enc *jsonEncoder) AppendComplex128(v complex128) { enc.appendComplex(complex128(v), 64) }
+func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
+func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
+func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) Clone() Encoder {
clone := enc.clone()
@@ -323,7 +350,7 @@
}
func (enc *jsonEncoder) clone() *jsonEncoder {
- clone := getJSONEncoder()
+ clone := _jsonPool.Get()
clone.EncoderConfig = enc.EncoderConfig
clone.spaced = enc.spaced
clone.openNamespaces = enc.openNamespaces
@@ -335,7 +362,7 @@
final := enc.clone()
final.buf.AppendByte('{')
- if final.LevelKey != "" {
+ if final.LevelKey != "" && final.EncodeLevel != nil {
final.addKey(final.LevelKey)
cur := final.buf.Len()
final.EncodeLevel(ent.Level, final)
@@ -345,7 +372,7 @@
final.AppendString(ent.Level.String())
}
}
- if final.TimeKey != "" {
+ if final.TimeKey != "" && !ent.Time.IsZero() {
final.AddTime(final.TimeKey, ent.Time)
}
if ent.LoggerName != "" && final.NameKey != "" {
@@ -396,11 +423,7 @@
final.AddString(final.StacktraceKey, ent.Stack)
}
final.buf.AppendByte('}')
- if final.LineEnding != "" {
- final.buf.AppendString(final.LineEnding)
- } else {
- final.buf.AppendString(DefaultLineEnding)
- }
+ final.buf.AppendString(final.LineEnding)
ret := final.buf
putJSONEncoder(final)
@@ -415,6 +438,7 @@
for i := 0; i < enc.openNamespaces; i++ {
enc.buf.AppendByte('}')
}
+ enc.openNamespaces = 0
}
func (enc *jsonEncoder) addKey(key string) {
@@ -462,73 +486,98 @@
// Unlike the standard library's encoder, it doesn't attempt to protect the
// user from browser vulnerabilities or JSONP-related problems.
func (enc *jsonEncoder) safeAddString(s string) {
- for i := 0; i < len(s); {
- if enc.tryAddRuneSelf(s[i]) {
- i++
- continue
- }
- r, size := utf8.DecodeRuneInString(s[i:])
- if enc.tryAddRuneError(r, size) {
- i++
- continue
- }
- enc.buf.AppendString(s[i : i+size])
- i += size
- }
+ safeAppendStringLike(
+ (*buffer.Buffer).AppendString,
+ utf8.DecodeRuneInString,
+ enc.buf,
+ s,
+ )
}
// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte.
func (enc *jsonEncoder) safeAddByteString(s []byte) {
+ safeAppendStringLike(
+ (*buffer.Buffer).AppendBytes,
+ utf8.DecodeRune,
+ enc.buf,
+ s,
+ )
+}
+
+// safeAppendStringLike is a generic implementation of safeAddString and safeAddByteString.
+// It appends a string or byte slice to the buffer, escaping all special characters.
+func safeAppendStringLike[S []byte | string](
+ // appendTo appends this string-like object to the buffer.
+ appendTo func(*buffer.Buffer, S),
+ // decodeRune decodes the next rune from the string-like object
+ // and returns its value and width in bytes.
+ decodeRune func(S) (rune, int),
+ buf *buffer.Buffer,
+ s S,
+) {
+ // The encoding logic below works by skipping over characters
+ // that can be safely copied as-is,
+ // until a character is found that needs special handling.
+ // At that point, we copy everything we've seen so far,
+ // and then handle that special character.
+ //
+ // last is the index of the last byte that was copied to the buffer.
+ last := 0
for i := 0; i < len(s); {
- if enc.tryAddRuneSelf(s[i]) {
- i++
- continue
- }
- r, size := utf8.DecodeRune(s[i:])
- if enc.tryAddRuneError(r, size) {
- i++
- continue
- }
- enc.buf.Write(s[i : i+size])
- i += size
- }
-}
+ if s[i] >= utf8.RuneSelf {
+ // Character >= RuneSelf may be part of a multi-byte rune.
+ // They need to be decoded before we can decide how to handle them.
+ r, size := decodeRune(s[i:])
+ if r != utf8.RuneError || size != 1 {
+ // No special handling required.
+ // Skip over this rune and continue.
+ i += size
+ continue
+ }
-// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte.
-func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool {
- if b >= utf8.RuneSelf {
- return false
- }
- if 0x20 <= b && b != '\\' && b != '"' {
- enc.buf.AppendByte(b)
- return true
- }
- switch b {
- case '\\', '"':
- enc.buf.AppendByte('\\')
- enc.buf.AppendByte(b)
- case '\n':
- enc.buf.AppendByte('\\')
- enc.buf.AppendByte('n')
- case '\r':
- enc.buf.AppendByte('\\')
- enc.buf.AppendByte('r')
- case '\t':
- enc.buf.AppendByte('\\')
- enc.buf.AppendByte('t')
- default:
- // Encode bytes < 0x20, except for the escape sequences above.
- enc.buf.AppendString(`\u00`)
- enc.buf.AppendByte(_hex[b>>4])
- enc.buf.AppendByte(_hex[b&0xF])
- }
- return true
-}
+ // Invalid UTF-8 sequence.
+ // Replace it with the Unicode replacement character.
+ appendTo(buf, s[last:i])
+ buf.AppendString(`\ufffd`)
-func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool {
- if r == utf8.RuneError && size == 1 {
- enc.buf.AppendString(`\ufffd`)
- return true
+ i++
+ last = i
+ } else {
+ // Character < RuneSelf is a single-byte UTF-8 rune.
+ if s[i] >= 0x20 && s[i] != '\\' && s[i] != '"' {
+ // No escaping necessary.
+ // Skip over this character and continue.
+ i++
+ continue
+ }
+
+ // This character needs to be escaped.
+ appendTo(buf, s[last:i])
+ switch s[i] {
+ case '\\', '"':
+ buf.AppendByte('\\')
+ buf.AppendByte(s[i])
+ case '\n':
+ buf.AppendByte('\\')
+ buf.AppendByte('n')
+ case '\r':
+ buf.AppendByte('\\')
+ buf.AppendByte('r')
+ case '\t':
+ buf.AppendByte('\\')
+ buf.AppendByte('t')
+ default:
+ // Encode bytes < 0x20, except for the escape sequences above.
+ buf.AppendString(`\u00`)
+ buf.AppendByte(_hex[s[i]>>4])
+ buf.AppendByte(_hex[s[i]&0xF])
+ }
+
+ i++
+ last = i
+ }
}
- return false
+
+ // add remaining
+ appendTo(buf, s[last:])
}
diff --git a/vendor/go.uber.org/zap/zapcore/lazy_with.go b/vendor/go.uber.org/zap/zapcore/lazy_with.go
new file mode 100644
index 0000000..05288d6
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/lazy_with.go
@@ -0,0 +1,54 @@
+// Copyright (c) 2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "sync"
+
+type lazyWithCore struct {
+ Core
+ sync.Once
+ fields []Field
+}
+
+// NewLazyWith wraps a Core with a "lazy" Core that will only encode fields if
+// the logger is written to (or is further chained in a lon-lazy manner).
+func NewLazyWith(core Core, fields []Field) Core {
+ return &lazyWithCore{
+ Core: core,
+ fields: fields,
+ }
+}
+
+func (d *lazyWithCore) initOnce() {
+ d.Once.Do(func() {
+ d.Core = d.Core.With(d.fields)
+ })
+}
+
+func (d *lazyWithCore) With(fields []Field) Core {
+ d.initOnce()
+ return d.Core.With(fields)
+}
+
+func (d *lazyWithCore) Check(e Entry, ce *CheckedEntry) *CheckedEntry {
+ d.initOnce()
+ return d.Core.Check(e, ce)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go
index e575c9f..e01a241 100644
--- a/vendor/go.uber.org/zap/zapcore/level.go
+++ b/vendor/go.uber.org/zap/zapcore/level.go
@@ -53,8 +53,62 @@
_minLevel = DebugLevel
_maxLevel = FatalLevel
+
+ // InvalidLevel is an invalid value for Level.
+ //
+ // Core implementations may panic if they see messages of this level.
+ InvalidLevel = _maxLevel + 1
)
+// ParseLevel parses a level based on the lower-case or all-caps ASCII
+// representation of the log level. If the provided ASCII representation is
+// invalid an error is returned.
+//
+// This is particularly useful when dealing with text input to configure log
+// levels.
+func ParseLevel(text string) (Level, error) {
+ var level Level
+ err := level.UnmarshalText([]byte(text))
+ return level, err
+}
+
+type leveledEnabler interface {
+ LevelEnabler
+
+ Level() Level
+}
+
+// LevelOf reports the minimum enabled log level for the given LevelEnabler
+// from Zap's supported log levels, or [InvalidLevel] if none of them are
+// enabled.
+//
+// A LevelEnabler may implement a 'Level() Level' method to override the
+// behavior of this function.
+//
+// func (c *core) Level() Level {
+// return c.currentLevel
+// }
+//
+// It is recommended that [Core] implementations that wrap other cores use
+// LevelOf to retrieve the level of the wrapped core. For example,
+//
+// func (c *coreWrapper) Level() Level {
+// return zapcore.LevelOf(c.wrappedCore)
+// }
+func LevelOf(enab LevelEnabler) Level {
+ if lvler, ok := enab.(leveledEnabler); ok {
+ return lvler.Level()
+ }
+
+ for lvl := _minLevel; lvl <= _maxLevel; lvl++ {
+ if enab.Enabled(lvl) {
+ return lvl
+ }
+ }
+
+ return InvalidLevel
+}
+
// String returns a lower-case ASCII representation of the log level.
func (l Level) String() string {
switch l {
diff --git a/vendor/go.uber.org/zap/global_prego112.go b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go
similarity index 64%
copy from vendor/go.uber.org/zap/global_prego112.go
copy to vendor/go.uber.org/zap/zapcore/reflected_encoder.go
index d3ab9af..8746360 100644
--- a/vendor/go.uber.org/zap/global_prego112.go
+++ b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -18,9 +18,24 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-// See #682 for more information.
-// +build !go1.12
+package zapcore
-package zap
+import (
+ "encoding/json"
+ "io"
+)
-const _stdLogDefaultDepth = 2
+// ReflectedEncoder serializes log fields that can't be serialized with Zap's
+// JSON encoder. These have the ReflectType field type.
+// Use EncoderConfig.NewReflectedEncoder to set this.
+type ReflectedEncoder interface {
+ // Encode encodes and writes to the underlying data stream.
+ Encode(interface{}) error
+}
+
+func defaultReflectedEncoder(w io.Writer) ReflectedEncoder {
+ enc := json.NewEncoder(w)
+ // For consistency with our custom JSON encoder.
+ enc.SetEscapeHTML(false)
+ return enc
+}
diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go
index 25f10ca..b7c093a 100644
--- a/vendor/go.uber.org/zap/zapcore/sampler.go
+++ b/vendor/go.uber.org/zap/zapcore/sampler.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -21,9 +21,8 @@
package zapcore
import (
+ "sync/atomic"
"time"
-
- "go.uber.org/atomic"
)
const (
@@ -66,16 +65,16 @@
tn := t.UnixNano()
resetAfter := c.resetAt.Load()
if resetAfter > tn {
- return c.counter.Inc()
+ return c.counter.Add(1)
}
c.counter.Store(1)
newResetAfter := tn + tick.Nanoseconds()
- if !c.resetAt.CAS(resetAfter, newResetAfter) {
+ if !c.resetAt.CompareAndSwap(resetAfter, newResetAfter) {
// We raced with another goroutine trying to reset, and it also reset
// the counter to 1, so we need to reincrement the counter.
- return c.counter.Inc()
+ return c.counter.Add(1)
}
return 1
@@ -113,12 +112,12 @@
// This hook may be used to get visibility into the performance of the sampler.
// For example, use it to track metrics of dropped versus sampled logs.
//
-// var dropped atomic.Int64
-// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
-// if dec&zapcore.LogDropped > 0 {
-// dropped.Inc()
-// }
-// })
+// var dropped atomic.Int64
+// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
+// if dec&zapcore.LogDropped > 0 {
+// dropped.Inc()
+// }
+// })
func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
return optionFunc(func(s *sampler) {
s.hook = hook
@@ -133,10 +132,21 @@
// each tick. If more Entries with the same level and message are seen during
// the same interval, every Mth message is logged and the rest are dropped.
//
+// For example,
+//
+// core = NewSamplerWithOptions(core, time.Second, 10, 5)
+//
+// This will log the first 10 log entries with the same level and message
+// in a one second interval as-is. Following that, it will allow through
+// every 5th log entry with the same level and message in that interval.
+//
+// If thereafter is zero, the Core will drop all log entries after the first N
+// in that interval.
+//
// Sampler can be configured to report sampling decisions with the SamplerHook
// option.
//
-// Keep in mind that zap's sampling implementation is optimized for speed over
+// Keep in mind that Zap's sampling implementation is optimized for speed over
// absolute precision; under load, each tick may be slightly over- or
// under-sampled.
func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core {
@@ -164,6 +174,11 @@
hook func(Entry, SamplingDecision)
}
+var (
+ _ Core = (*sampler)(nil)
+ _ leveledEnabler = (*sampler)(nil)
+)
+
// NewSampler creates a Core that samples incoming entries, which
// caps the CPU and I/O load of logging while attempting to preserve a
// representative subset of your logs.
@@ -181,6 +196,10 @@
return NewSamplerWithOptions(core, tick, first, thereafter)
}
+func (s *sampler) Level() Level {
+ return LevelOf(s.Core)
+}
+
func (s *sampler) With(fields []Field) Core {
return &sampler{
Core: s.Core.With(fields),
@@ -197,12 +216,14 @@
return ce
}
- counter := s.counts.get(ent.Level, ent.Message)
- n := counter.IncCheckReset(ent.Time, s.tick)
- if n > s.first && (n-s.first)%s.thereafter != 0 {
- s.hook(ent, LogDropped)
- return ce
+ if ent.Level >= _minLevel && ent.Level <= _maxLevel {
+ counter := s.counts.get(ent.Level, ent.Message)
+ n := counter.IncCheckReset(ent.Time, s.tick)
+ if n > s.first && (s.thereafter == 0 || (n-s.first)%s.thereafter != 0) {
+ s.hook(ent, LogDropped)
+ return ce
+ }
+ s.hook(ent, LogSampled)
}
- s.hook(ent, LogSampled)
return s.Core.Check(ent, ce)
}
diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go
index 07a32ee..9bb32f0 100644
--- a/vendor/go.uber.org/zap/zapcore/tee.go
+++ b/vendor/go.uber.org/zap/zapcore/tee.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -24,6 +24,11 @@
type multiCore []Core
+var (
+ _ leveledEnabler = multiCore(nil)
+ _ Core = multiCore(nil)
+)
+
// NewTee creates a Core that duplicates log entries into two or more
// underlying Cores.
//
@@ -48,6 +53,16 @@
return clone
}
+func (mc multiCore) Level() Level {
+ minLvl := _maxLevel // mc is never empty
+ for i := range mc {
+ if lvl := LevelOf(mc[i]); lvl < minLvl {
+ minLvl = lvl
+ }
+ }
+ return minLvl
+}
+
func (mc multiCore) Enabled(lvl Level) bool {
for i := range mc {
if mc[i].Enabled(lvl) {
diff --git a/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go b/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go
new file mode 100644
index 0000000..682de25
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go
@@ -0,0 +1,243 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package zapgrpc provides a logger that is compatible with grpclog.
+package zapgrpc // import "go.uber.org/zap/zapgrpc"
+
+import (
+ "fmt"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+)
+
+// See https://github.com/grpc/grpc-go/blob/v1.35.0/grpclog/loggerv2.go#L77-L86
+const (
+ grpcLvlInfo int = iota
+ grpcLvlWarn
+ grpcLvlError
+ grpcLvlFatal
+)
+
+// _grpcToZapLevel maps gRPC log levels to zap log levels.
+// See https://pkg.go.dev/go.uber.org/zap@v1.16.0/zapcore#Level
+var _grpcToZapLevel = map[int]zapcore.Level{
+ grpcLvlInfo: zapcore.InfoLevel,
+ grpcLvlWarn: zapcore.WarnLevel,
+ grpcLvlError: zapcore.ErrorLevel,
+ grpcLvlFatal: zapcore.FatalLevel,
+}
+
+// An Option overrides a Logger's default configuration.
+type Option interface {
+ apply(*Logger)
+}
+
+type optionFunc func(*Logger)
+
+func (f optionFunc) apply(log *Logger) {
+ f(log)
+}
+
+// WithDebug configures a Logger to print at zap's DebugLevel instead of
+// InfoLevel.
+// It only affects the Printf, Println and Print methods, which are only used in the gRPC v1 grpclog.Logger API.
+//
+// Deprecated: use grpclog.SetLoggerV2() for v2 API.
+func WithDebug() Option {
+ return optionFunc(func(logger *Logger) {
+ logger.print = &printer{
+ enab: logger.levelEnabler,
+ level: zapcore.DebugLevel,
+ print: logger.delegate.Debug,
+ printf: logger.delegate.Debugf,
+ }
+ })
+}
+
+// withWarn redirects the fatal level to the warn level, which makes testing
+// easier. This is intentionally unexported.
+func withWarn() Option {
+ return optionFunc(func(logger *Logger) {
+ logger.fatal = &printer{
+ enab: logger.levelEnabler,
+ level: zapcore.WarnLevel,
+ print: logger.delegate.Warn,
+ printf: logger.delegate.Warnf,
+ }
+ })
+}
+
+// NewLogger returns a new Logger.
+func NewLogger(l *zap.Logger, options ...Option) *Logger {
+ logger := &Logger{
+ delegate: l.Sugar(),
+ levelEnabler: l.Core(),
+ }
+ logger.print = &printer{
+ enab: logger.levelEnabler,
+ level: zapcore.InfoLevel,
+ print: logger.delegate.Info,
+ printf: logger.delegate.Infof,
+ }
+ logger.fatal = &printer{
+ enab: logger.levelEnabler,
+ level: zapcore.FatalLevel,
+ print: logger.delegate.Fatal,
+ printf: logger.delegate.Fatalf,
+ }
+ for _, option := range options {
+ option.apply(logger)
+ }
+ return logger
+}
+
+// printer implements Print, Printf, and Println operations for a Zap level.
+//
+// We use it to customize Debug vs Info, and Warn vs Fatal for Print and Fatal
+// respectively.
+type printer struct {
+ enab zapcore.LevelEnabler
+ level zapcore.Level
+ print func(...interface{})
+ printf func(string, ...interface{})
+}
+
+func (v *printer) Print(args ...interface{}) {
+ v.print(args...)
+}
+
+func (v *printer) Printf(format string, args ...interface{}) {
+ v.printf(format, args...)
+}
+
+func (v *printer) Println(args ...interface{}) {
+ if v.enab.Enabled(v.level) {
+ v.print(sprintln(args))
+ }
+}
+
+// Logger adapts zap's Logger to be compatible with grpclog.LoggerV2 and the deprecated grpclog.Logger.
+type Logger struct {
+ delegate *zap.SugaredLogger
+ levelEnabler zapcore.LevelEnabler
+ print *printer
+ fatal *printer
+ // printToDebug bool
+ // fatalToWarn bool
+}
+
+// Print implements grpclog.Logger.
+//
+// Deprecated: use [Logger.Info].
+func (l *Logger) Print(args ...interface{}) {
+ l.print.Print(args...)
+}
+
+// Printf implements grpclog.Logger.
+//
+// Deprecated: use [Logger.Infof].
+func (l *Logger) Printf(format string, args ...interface{}) {
+ l.print.Printf(format, args...)
+}
+
+// Println implements grpclog.Logger.
+//
+// Deprecated: use [Logger.Info].
+func (l *Logger) Println(args ...interface{}) {
+ l.print.Println(args...)
+}
+
+// Info implements grpclog.LoggerV2.
+func (l *Logger) Info(args ...interface{}) {
+ l.delegate.Info(args...)
+}
+
+// Infoln implements grpclog.LoggerV2.
+func (l *Logger) Infoln(args ...interface{}) {
+ if l.levelEnabler.Enabled(zapcore.InfoLevel) {
+ l.delegate.Info(sprintln(args))
+ }
+}
+
+// Infof implements grpclog.LoggerV2.
+func (l *Logger) Infof(format string, args ...interface{}) {
+ l.delegate.Infof(format, args...)
+}
+
+// Warning implements grpclog.LoggerV2.
+func (l *Logger) Warning(args ...interface{}) {
+ l.delegate.Warn(args...)
+}
+
+// Warningln implements grpclog.LoggerV2.
+func (l *Logger) Warningln(args ...interface{}) {
+ if l.levelEnabler.Enabled(zapcore.WarnLevel) {
+ l.delegate.Warn(sprintln(args))
+ }
+}
+
+// Warningf implements grpclog.LoggerV2.
+func (l *Logger) Warningf(format string, args ...interface{}) {
+ l.delegate.Warnf(format, args...)
+}
+
+// Error implements grpclog.LoggerV2.
+func (l *Logger) Error(args ...interface{}) {
+ l.delegate.Error(args...)
+}
+
+// Errorln implements grpclog.LoggerV2.
+func (l *Logger) Errorln(args ...interface{}) {
+ if l.levelEnabler.Enabled(zapcore.ErrorLevel) {
+ l.delegate.Error(sprintln(args))
+ }
+}
+
+// Errorf implements grpclog.LoggerV2.
+func (l *Logger) Errorf(format string, args ...interface{}) {
+ l.delegate.Errorf(format, args...)
+}
+
+// Fatal implements grpclog.LoggerV2.
+func (l *Logger) Fatal(args ...interface{}) {
+ l.fatal.Print(args...)
+}
+
+// Fatalln implements grpclog.LoggerV2.
+func (l *Logger) Fatalln(args ...interface{}) {
+ l.fatal.Println(args...)
+}
+
+// Fatalf implements grpclog.LoggerV2.
+func (l *Logger) Fatalf(format string, args ...interface{}) {
+ l.fatal.Printf(format, args...)
+}
+
+// V implements grpclog.LoggerV2.
+func (l *Logger) V(level int) bool {
+ return l.levelEnabler.Enabled(_grpcToZapLevel[level])
+}
+
+func sprintln(args []interface{}) string {
+ s := fmt.Sprintln(args...)
+ // Drop the new line character added by Sprintln
+ return s[:len(s)-1]
+}