1
0
mirror of https://github.com/Luzifer/vault-openvpn.git synced 2024-09-21 02:22:56 +00:00

Deps: Update dependencies

Signed-off-by: Knut Ahlers <knut@ahlers.me>
This commit is contained in:
Knut Ahlers 2018-10-08 13:25:24 +02:00
parent 6889a770a3
commit fd9a68bdd8
Signed by: luzifer
GPG Key ID: DC2729FDD34BE99E
267 changed files with 29601 additions and 4028 deletions

155
Gopkg.lock generated
View File

@ -18,35 +18,44 @@
version = "v1.4.7" version = "v1.4.7"
[[projects]] [[projects]]
digest = "1:9413ddbde906f91f062fda0dfa9a7cff43458cd1b2282c0fa25c61d89300b116" branch = "master"
digest = "1:7f114b78210bf5b75f307fc97cff293633c835bab1e0ea8a744a44b39c042dfe"
name = "github.com/golang/snappy" name = "github.com/golang/snappy"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "553a641470496b2327abcac10b36396bd98e45c9" revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
[[projects]] [[projects]]
branch = "master"
digest = "1:f0d9d74edbd40fdeada436d5ac9cb5197407899af3fef85ff0137077ffe8ae19" digest = "1:f0d9d74edbd40fdeada436d5ac9cb5197407899af3fef85ff0137077ffe8ae19"
name = "github.com/hashicorp/errwrap" name = "github.com/hashicorp/errwrap"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "7554cd9344cec97297fa6649b055a8c98c2a1e55" revision = "8a6fb523712970c966eefc6b39ed2c5e74880354"
version = "v1.0.0"
[[projects]] [[projects]]
branch = "master"
digest = "1:a5d940c38bf56f121721bfa747c66356df387cb9d5318c570c6d4170aab62862" digest = "1:a5d940c38bf56f121721bfa747c66356df387cb9d5318c570c6d4170aab62862"
name = "github.com/hashicorp/go-cleanhttp" name = "github.com/hashicorp/go-cleanhttp"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d" revision = "e8ab9daed8d1ddd2d3c4efba338fe2eeae2e4f18"
version = "v0.5.0"
[[projects]] [[projects]]
branch = "master" digest = "1:2ed138049ab373f696db2081ca48f15c5abdf20893803612a284f2bdce2bf443"
digest = "1:4d55897d00e9b53c1c716e8fe504de3d21bec8edba0a330bfaa87902695e46e2"
name = "github.com/hashicorp/go-multierror" name = "github.com/hashicorp/go-multierror"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "b7773ae218740a7be65057fc60b366a49b538a44" revision = "886a7fbe3eb1c874d46f623bfa70af45f425b3d1"
version = "v1.0.0"
[[projects]]
branch = "master"
digest = "1:f299bf12387ef9e1e36571851c4bb2c5024b5e66d16cfa77b220ad488b47d196"
name = "github.com/hashicorp/go-retryablehttp"
packages = ["."]
pruneopts = "NUT"
revision = "e651d75abec6fbd4f2c09508f72ae7af8a8b7171"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -57,14 +66,14 @@
revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00" revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00"
[[projects]] [[projects]]
digest = "1:0c539f680465786826d5022b7528ee42086d13d298782e5479506cd7c12e2daa" branch = "master"
digest = "1:ab128c55634eb166f6ab170896ac0f53979992250811071938d6bf2af7034690"
name = "github.com/hashicorp/go-sockaddr" name = "github.com/hashicorp/go-sockaddr"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "7165ee14aff120ee3642aa2bcf2dea8eebef29c3" revision = "6d291a969b86c4b633730bfc6b8b9d64c3aafed9"
[[projects]] [[projects]]
branch = "master"
digest = "1:11c6c696067d3127ecf332b10f89394d386d9083f82baf71f40f2da31841a009" digest = "1:11c6c696067d3127ecf332b10f89394d386d9083f82baf71f40f2da31841a009"
name = "github.com/hashicorp/hcl" name = "github.com/hashicorp/hcl"
packages = [ packages = [
@ -80,23 +89,25 @@
"json/token", "json/token",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168" revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241"
version = "v1.0.0"
[[projects]] [[projects]]
digest = "1:703439a234b0f6366136be3f6aac6abb7886a3b4f81278f5be96c4bfd9a43087" digest = "1:7d92cfe9eb44a0babac6c1edfbb19d1b15966d1ca75aa818dc337b4da5deb4d6"
name = "github.com/hashicorp/vault" name = "github.com/hashicorp/vault"
packages = [ packages = [
"api", "api",
"helper/certutil", "helper/certutil",
"helper/compressutil", "helper/compressutil",
"helper/errutil", "helper/errutil",
"helper/hclutil",
"helper/jsonutil", "helper/jsonutil",
"helper/parseutil", "helper/parseutil",
"helper/strutil", "helper/strutil",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "756fdc4587350daf1c65b93647b2cc31a6f119cd" revision = "e21712a687889de1125e0a12a980420b1a4f72d3"
version = "v0.10.1" version = "v0.10.4"
[[projects]] [[projects]]
digest = "1:406338ad39ab2e37b7f4452906442a3dbf0eb3379dd1f06aafb5c07e769a5fbb" digest = "1:406338ad39ab2e37b7f4452906442a3dbf0eb3379dd1f06aafb5c07e769a5fbb"
@ -106,6 +117,14 @@
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0" version = "v1.0"
[[projects]]
digest = "1:4059c14e87a2de3a434430340521b5feece186c1469eff0834c29a63870de3ed"
name = "github.com/konsorten/go-windows-terminal-sequences"
packages = ["."]
pruneopts = "NUT"
revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242"
version = "v1.0.1"
[[projects]] [[projects]]
digest = "1:d244f8666a838fe6ad70ec8fe77f50ebc29fdc3331a2729ba5886bef8435d10d" digest = "1:d244f8666a838fe6ad70ec8fe77f50ebc29fdc3331a2729ba5886bef8435d10d"
name = "github.com/magiconair/properties" name = "github.com/magiconair/properties"
@ -115,27 +134,28 @@
version = "v1.8.0" version = "v1.8.0"
[[projects]] [[projects]]
digest = "1:cb591533458f6eb6e2c1065ff3eac6b50263d7847deb23fc9f79b25bc608970e" digest = "1:bff482b22ebed387378546ba6a7850fdef87fd47f8ee58a7c62124a8e889a56b"
name = "github.com/mattn/go-runewidth" name = "github.com/mattn/go-runewidth"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "9e777a8366cce605130a531d2cd6363d07ad7317" revision = "ce7b0b5c7b45a81508558cd1dba6bb1e4ddb51bb"
version = "v0.0.2" version = "v0.0.3"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:9db29b604bd78452d167abed82386ddd2f93973df3841896fb6ab8aff936f1d6" digest = "1:a4df73029d2c42fabcb6b41e327d2f87e685284ec03edf76921c267d9cfc9c23"
name = "github.com/mitchellh/go-homedir" name = "github.com/mitchellh/go-homedir"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "3864e76763d94a6df2f9960b16a20a33da9f9a66" revision = "ae18d6b8b3205b561c79e8e5f69bff09736185f4"
[[projects]] [[projects]]
digest = "1:4b9dacaf3496e8bd82173b88c38004028a103b03ddd2de15a0a108f04619e00b" digest = "1:a45ae66dea4c899d79fceb116accfa1892105c251f0dcd9a217ddc276b42ec68"
name = "github.com/mitchellh/mapstructure" name = "github.com/mitchellh/mapstructure"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "a4e142e9c047c904fa2f1e144d9a84e6133024bc" revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe"
version = "v1.1.2"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -143,15 +163,15 @@
name = "github.com/olekukonko/tablewriter" name = "github.com/olekukonko/tablewriter"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "d4647c9c7a84d847478d890b816b7d8b62b0b279" revision = "be2c049b30ccd4d3fd795d6bf7dce74e42eeedaa"
[[projects]] [[projects]]
digest = "1:13b8f1a2ce177961dc9231606a52f709fab896c565f3988f60a7f6b4e543a902" digest = "1:51ea800cff51752ff68e12e04106f5887b4daec6f9356721238c28019f0b42db"
name = "github.com/pelletier/go-toml" name = "github.com/pelletier/go-toml"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "acdc4509485b587f5e675510c4f2c63e90ff68a8" revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194"
version = "v1.1.0" version = "v1.2.0"
[[projects]] [[projects]]
digest = "1:5cf3f025cbee5951a4ee961de067c8a89fc95a5adabead774f82822efabab121" digest = "1:5cf3f025cbee5951a4ee961de067c8a89fc95a5adabead774f82822efabab121"
@ -162,38 +182,31 @@
version = "v0.8.0" version = "v0.8.0"
[[projects]] [[projects]]
branch = "master" digest = "1:cb24eec7a9478395847671abfbea162885f0be9c7ff6ef20b699dc20804ae1a4"
digest = "1:09d61699d553a4e6ec998ad29816177b1f3d3ed0c18fe923d2c174ec065c99c8"
name = "github.com/ryanuber/go-glob" name = "github.com/ryanuber/go-glob"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "256dc444b735e061061cf46c809487313d5b0065" revision = "572520ed46dbddaed19ea3d9541bdd0494163693"
version = "v0.1"
[[projects]] [[projects]]
digest = "1:45010b961ea5349797980f57e2b7cd54b398d71ec3aba056a6bf2916d7cce024" digest = "1:01252cd79aac70f16cac02a72a1067dd136e0ad6d5b597d0129cf74c739fd8d1"
name = "github.com/sethgrid/pester"
packages = ["."]
pruneopts = "NUT"
revision = "ed9870dad3170c0b25ab9b11830cc57c3a7798fb"
[[projects]]
digest = "1:6989062eb7ccf25cf38bf4fe3dba097ee209f896cda42cefdca3927047bef7b6"
name = "github.com/sirupsen/logrus" name = "github.com/sirupsen/logrus"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" revision = "a67f783a3814b8729bd2dac5780b5f78f8dbd64d"
version = "v1.0.5" version = "v1.1.0"
[[projects]] [[projects]]
digest = "1:35f36ea322654e3d0932e58ad26556998260b9fa9e691471f756d173684eac0a" digest = "1:330e9062b308ac597e28485699c02223bd052437a6eed32a173c9227dcb9d95a"
name = "github.com/spf13/afero" name = "github.com/spf13/afero"
packages = [ packages = [
".", ".",
"mem", "mem",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "63644898a8da0bc22138abf860edaf5277b6102e" revision = "d40851caa0d747393da1ffb28f7f9d8b4eeffebd"
version = "v1.1.0" version = "v1.1.2"
[[projects]] [[projects]]
digest = "1:3fa7947ca83b98ae553590d993886e845a4bff19b7b007e869c6e0dd3b9da9cd" digest = "1:3fa7947ca83b98ae553590d993886e845a4bff19b7b007e869c6e0dd3b9da9cd"
@ -212,28 +225,28 @@
version = "v0.0.3" version = "v0.0.3"
[[projects]] [[projects]]
branch = "master"
digest = "1:f29f83301ed096daed24a90f4af591b7560cb14b9cc3e1827abbf04db7269ab5" digest = "1:f29f83301ed096daed24a90f4af591b7560cb14b9cc3e1827abbf04db7269ab5"
name = "github.com/spf13/jwalterweatherman" name = "github.com/spf13/jwalterweatherman"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "7c0cea34c8ece3fbeb2b27ab9b59511d360fb394" revision = "4a4406e478ca629068e7768fc33f3f044173c0a6"
version = "v1.0.0"
[[projects]] [[projects]]
digest = "1:15e5c398fbd9d2c439b635a08ac161b13d04f0c2aa587fe256b65dc0c3efe8b7" digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779"
name = "github.com/spf13/pflag" name = "github.com/spf13/pflag"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "583c0c0531f06d5278b7d917446061adc344b5cd" revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
version = "v1.0.1" version = "v1.0.3"
[[projects]] [[projects]]
digest = "1:ea67fb4941c0a1a92f828e73cf426533c71db02df45a2cdf55a14c3e7b74c07a" digest = "1:9caed8c7c05e8927f5bc6eec5698119b2b63a7cf304effc6c9b818bc864a10d3"
name = "github.com/spf13/viper" name = "github.com/spf13/viper"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "b5e8006cbee93ec955a89ab31e0e3ce3204f3736" revision = "2c12c60302a5a0e62ee102ca9bc996277c2f64f5"
version = "v1.0.2" version = "v1.2.1"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -241,30 +254,32 @@
name = "golang.org/x/crypto" name = "golang.org/x/crypto"
packages = ["ssh/terminal"] packages = ["ssh/terminal"]
pruneopts = "NUT" pruneopts = "NUT"
revision = "ab813273cd59e1333f7ae7bff5d027d4aadf528c" revision = "e3636079e1a4c1f337f212cc5cd2aca108f6c900"
[[projects]]
digest = "1:aff5fb0c8a4eaa5967146b2efa1a1e59b6195cf40574ab4983a5f222898f0660"
name = "golang.org/x/net"
packages = [
"http2",
"http2/hpack",
"idna",
"lex/httplex",
]
pruneopts = "NUT"
revision = "f5dfe339be1d06f81b22525fe34671ee7d2c8904"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:0dbe2a3199cde81dd841ea2de1386165f953d227ddb2b5c2c4b591209bf37746" digest = "1:d59fa2f6b43207b64304b29530b4c9c7d2466c1406429ab620ab68e689a868ac"
name = "golang.org/x/net"
packages = [
"context",
"http/httpguts",
"http2",
"http2/hpack",
"idna",
]
pruneopts = "NUT"
revision = "146acd28ed5894421fb5aac80ca93bc1b1f46f87"
[[projects]]
branch = "master"
digest = "1:68ca1f18d986adc1d25a28aa732806ee8f1c874cc5b606e4ba67ddd3eeb89e20"
name = "golang.org/x/sys" name = "golang.org/x/sys"
packages = [ packages = [
"unix", "unix",
"windows", "windows",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "c11f84a56e43e20a78cee75a7c034031ecf57d1f" revision = "4497e2df6f9e69048a54498c7affbbec3294ad47"
[[projects]] [[projects]]
digest = "1:e7071ed636b5422cc51c0e3a6cebc229d6c9fffc528814b519a980641422d619" digest = "1:e7071ed636b5422cc51c0e3a6cebc229d6c9fffc528814b519a980641422d619"
@ -289,6 +304,14 @@
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0" version = "v0.3.0"
[[projects]]
branch = "master"
digest = "1:c9e7a4b4d47c0ed205d257648b0e5b0440880cb728506e318f8ac7cd36270bc4"
name = "golang.org/x/time"
packages = ["rate"]
pruneopts = "NUT"
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
[[projects]] [[projects]]
digest = "1:7c95b35057a0ff2e19f707173cc1a947fa43a6eb5c4d300d196ece0334046082" digest = "1:7c95b35057a0ff2e19f707173cc1a947fa43a6eb5c4d300d196ece0334046082"
name = "gopkg.in/yaml.v2" name = "gopkg.in/yaml.v2"

View File

@ -2,10 +2,21 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Package snappy implements the snappy block-based compression format. // Package snappy implements the Snappy compression format. It aims for very
// It aims for very high speeds and reasonable compression. // high speeds and reasonable compression.
// //
// The C++ snappy implementation is at https://github.com/google/snappy // There are actually two Snappy formats: block and stream. They are related,
// but different: trying to decompress block-compressed data as a Snappy stream
// will fail, and vice versa. The block format is the Decode and Encode
// functions and the stream format is the Reader and Writer types.
//
// The block format, the more common case, is used when the complete size (the
// number of bytes) of the original data is known upfront, at the time
// compression starts. The stream format, also known as the framing format, is
// for when that isn't always true.
//
// The canonical, C++ implementation is at https://github.com/google/snappy and
// it only implements the block format.
package snappy // import "github.com/golang/snappy" package snappy // import "github.com/golang/snappy"
import ( import (

View File

@ -13,7 +13,7 @@ type ErrorFormatFunc func([]error) string
// that occurred along with a bullet point list of the errors. // that occurred along with a bullet point list of the errors.
func ListFormatFunc(es []error) string { func ListFormatFunc(es []error) string {
if len(es) == 1 { if len(es) == 1 {
return fmt.Sprintf("1 error occurred:\n\n* %s", es[0]) return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0])
} }
points := make([]string, len(es)) points := make([]string, len(es))
@ -22,6 +22,6 @@ func ListFormatFunc(es []error) string {
} }
return fmt.Sprintf( return fmt.Sprintf(
"%d errors occurred:\n\n%s", "%d errors occurred:\n\t%s\n\n",
len(es), strings.Join(points, "\n")) len(es), strings.Join(points, "\n\t"))
} }

16
vendor/github.com/hashicorp/go-multierror/sort.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
package multierror
// Len implements sort.Interface function for length
func (err Error) Len() int {
return len(err.Errors)
}
// Swap implements sort.Interface function for swapping elements
func (err Error) Swap(i, j int) {
err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i]
}
// Less implements sort.Interface function for determining order
func (err Error) Less(i, j int) bool {
return err.Errors[i].Error() < err.Errors[j].Error()
}

363
vendor/github.com/hashicorp/go-retryablehttp/LICENSE generated vendored Normal file
View File

@ -0,0 +1,363 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

500
vendor/github.com/hashicorp/go-retryablehttp/client.go generated vendored Normal file
View File

@ -0,0 +1,500 @@
// The retryablehttp package provides a familiar HTTP client interface with
// automatic retries and exponential backoff. It is a thin wrapper over the
// standard net/http client library and exposes nearly the same public API.
// This makes retryablehttp very easy to drop into existing programs.
//
// retryablehttp performs automatic retries under certain conditions. Mainly, if
// an error is returned by the client (connection errors etc), or if a 500-range
// response is received, then a retry is invoked. Otherwise, the response is
// returned and left to the caller to interpret.
//
// Requests which take a request body should provide a non-nil function
// parameter. The best choice is to provide either a function satisfying
// ReaderFunc which provides multiple io.Readers in an efficient manner, a
// *bytes.Buffer (the underlying raw byte slice will be used) or a raw byte
// slice. As it is a reference type, and we will wrap it as needed by readers,
// we can efficiently re-use the request body without needing to copy it. If an
// io.Reader (such as a *bytes.Reader) is provided, the full body will be read
// prior to the first request, and will be efficiently re-used for any retries.
// ReadSeeker can be used, but some users have observed occasional data races
// between the net/http library and the Seek functionality of some
// implementations of ReadSeeker, so should be avoided if possible.
package retryablehttp
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"log"
"math"
"math/rand"
"net/http"
"net/url"
"os"
"strings"
"time"
"github.com/hashicorp/go-cleanhttp"
)
var (
// Default retry configuration
defaultRetryWaitMin = 1 * time.Second
defaultRetryWaitMax = 30 * time.Second
defaultRetryMax = 4
// defaultClient is used for performing requests without explicitly making
// a new client. It is purposely private to avoid modifications.
defaultClient = NewClient()
// We need to consume response bodies to maintain http connections, but
// limit the size we consume to respReadLimit.
respReadLimit = int64(4096)
)
// ReaderFunc is the type of function that can be given natively to NewRequest
type ReaderFunc func() (io.Reader, error)
// LenReader is an interface implemented by many in-memory io.Reader's. Used
// for automatically sending the right Content-Length header when possible.
type LenReader interface {
Len() int
}
// Request wraps the metadata needed to create HTTP requests.
type Request struct {
// body is a seekable reader over the request body payload. This is
// used to rewind the request data in between retries.
body ReaderFunc
// Embed an HTTP request directly. This makes a *Request act exactly
// like an *http.Request so that all meta methods are supported.
*http.Request
}
// WithContext returns wrapped Request with a shallow copy of underlying *http.Request
// with its context changed to ctx. The provided ctx must be non-nil.
func (r *Request) WithContext(ctx context.Context) *Request {
r.Request = r.Request.WithContext(ctx)
return r
}
// NewRequest creates a new wrapped request.
func NewRequest(method, url string, rawBody interface{}) (*Request, error) {
var err error
var body ReaderFunc
var contentLength int64
if rawBody != nil {
switch rawBody.(type) {
// If they gave us a function already, great! Use it.
case ReaderFunc:
body = rawBody.(ReaderFunc)
tmp, err := body()
if err != nil {
return nil, err
}
if lr, ok := tmp.(LenReader); ok {
contentLength = int64(lr.Len())
}
if c, ok := tmp.(io.Closer); ok {
c.Close()
}
case func() (io.Reader, error):
body = rawBody.(func() (io.Reader, error))
tmp, err := body()
if err != nil {
return nil, err
}
if lr, ok := tmp.(LenReader); ok {
contentLength = int64(lr.Len())
}
if c, ok := tmp.(io.Closer); ok {
c.Close()
}
// If a regular byte slice, we can read it over and over via new
// readers
case []byte:
buf := rawBody.([]byte)
body = func() (io.Reader, error) {
return bytes.NewReader(buf), nil
}
contentLength = int64(len(buf))
// If a bytes.Buffer we can read the underlying byte slice over and
// over
case *bytes.Buffer:
buf := rawBody.(*bytes.Buffer)
body = func() (io.Reader, error) {
return bytes.NewReader(buf.Bytes()), nil
}
contentLength = int64(buf.Len())
// We prioritize *bytes.Reader here because we don't really want to
// deal with it seeking so want it to match here instead of the
// io.ReadSeeker case.
case *bytes.Reader:
buf, err := ioutil.ReadAll(rawBody.(*bytes.Reader))
if err != nil {
return nil, err
}
body = func() (io.Reader, error) {
return bytes.NewReader(buf), nil
}
contentLength = int64(len(buf))
// Compat case
case io.ReadSeeker:
raw := rawBody.(io.ReadSeeker)
body = func() (io.Reader, error) {
raw.Seek(0, 0)
return ioutil.NopCloser(raw), nil
}
if lr, ok := raw.(LenReader); ok {
contentLength = int64(lr.Len())
}
// Read all in so we can reset
case io.Reader:
buf, err := ioutil.ReadAll(rawBody.(io.Reader))
if err != nil {
return nil, err
}
body = func() (io.Reader, error) {
return bytes.NewReader(buf), nil
}
contentLength = int64(len(buf))
default:
return nil, fmt.Errorf("cannot handle type %T", rawBody)
}
}
httpReq, err := http.NewRequest(method, url, nil)
if err != nil {
return nil, err
}
httpReq.ContentLength = contentLength
return &Request{body, httpReq}, nil
}
// RequestLogHook allows a function to run before each retry. The HTTP
// request which will be made, and the retry number (0 for the initial
// request) are available to users. The internal logger is exposed to
// consumers.
type RequestLogHook func(*log.Logger, *http.Request, int)
// ResponseLogHook is like RequestLogHook, but allows running a function
// on each HTTP response. This function will be invoked at the end of
// every HTTP request executed, regardless of whether a subsequent retry
// needs to be performed or not. If the response body is read or closed
// from this method, this will affect the response returned from Do().
type ResponseLogHook func(*log.Logger, *http.Response)
// CheckRetry specifies a policy for handling retries. It is called
// following each request with the response and error values returned by
// the http.Client. If CheckRetry returns false, the Client stops retrying
// and returns the response to the caller. If CheckRetry returns an error,
// that error value is returned in lieu of the error from the request. The
// Client will close any response body when retrying, but if the retry is
// aborted it is up to the CheckResponse callback to properly close any
// response body before returning.
type CheckRetry func(ctx context.Context, resp *http.Response, err error) (bool, error)
// Backoff specifies a policy for how long to wait between retries.
// It is called after a failing request to determine the amount of time
// that should pass before trying again.
type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration
// ErrorHandler is called if retries are expired, containing the last status
// from the http library. If not specified, default behavior for the library is
// to close the body and return an error indicating how many tries were
// attempted. If overriding this, be sure to close the body if needed.
type ErrorHandler func(resp *http.Response, err error, numTries int) (*http.Response, error)
// Client is used to make HTTP requests. It adds additional functionality
// like automatic retries to tolerate minor outages.
type Client struct {
HTTPClient *http.Client // Internal HTTP client.
Logger *log.Logger // Customer logger instance.
RetryWaitMin time.Duration // Minimum time to wait
RetryWaitMax time.Duration // Maximum time to wait
RetryMax int // Maximum number of retries
// RequestLogHook allows a user-supplied function to be called
// before each retry.
RequestLogHook RequestLogHook
// ResponseLogHook allows a user-supplied function to be called
// with the response from each HTTP request executed.
ResponseLogHook ResponseLogHook
// CheckRetry specifies the policy for handling retries, and is called
// after each request. The default policy is DefaultRetryPolicy.
CheckRetry CheckRetry
// Backoff specifies the policy for how long to wait between retries
Backoff Backoff
// ErrorHandler specifies the custom error handler to use, if any
ErrorHandler ErrorHandler
}
// NewClient creates a new Client with default settings.
func NewClient() *Client {
return &Client{
HTTPClient: cleanhttp.DefaultClient(),
Logger: log.New(os.Stderr, "", log.LstdFlags),
RetryWaitMin: defaultRetryWaitMin,
RetryWaitMax: defaultRetryWaitMax,
RetryMax: defaultRetryMax,
CheckRetry: DefaultRetryPolicy,
Backoff: DefaultBackoff,
}
}
// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which
// will retry on connection errors and server errors.
func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) {
// do not retry on context.Canceled or context.DeadlineExceeded
if ctx.Err() != nil {
return false, ctx.Err()
}
if err != nil {
return true, err
}
// Check the response code. We retry on 500-range responses to allow
// the server time to recover, as 500's are typically not permanent
// errors and may relate to outages on the server side. This will catch
// invalid response codes as well, like 0 and 999.
if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != 501) {
return true, nil
}
return false, nil
}
// DefaultBackoff provides a default callback for Client.Backoff which
// will perform exponential backoff based on the attempt number and limited
// by the provided minimum and maximum durations.
func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration {
mult := math.Pow(2, float64(attemptNum)) * float64(min)
sleep := time.Duration(mult)
if float64(sleep) != mult || sleep > max {
sleep = max
}
return sleep
}
// LinearJitterBackoff provides a callback for Client.Backoff which will
// perform linear backoff based on the attempt number and with jitter to
// prevent a thundering herd.
//
// min and max here are *not* absolute values. The number to be multipled by
// the attempt number will be chosen at random from between them, thus they are
// bounding the jitter.
//
// For instance:
// * To get strictly linear backoff of one second increasing each retry, set
// both to one second (1s, 2s, 3s, 4s, ...)
// * To get a small amount of jitter centered around one second increasing each
// retry, set to around one second, such as a min of 800ms and max of 1200ms
// (892ms, 2102ms, 2945ms, 4312ms, ...)
// * To get extreme jitter, set to a very wide spread, such as a min of 100ms
// and a max of 20s (15382ms, 292ms, 51321ms, 35234ms, ...)
func LinearJitterBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration {
// attemptNum always starts at zero but we want to start at 1 for multiplication
attemptNum++
if max <= min {
// Unclear what to do here, or they are the same, so return min *
// attemptNum
return min * time.Duration(attemptNum)
}
// Seed rand; doing this every time is fine
rand := rand.New(rand.NewSource(int64(time.Now().Nanosecond())))
// Pick a random number that lies somewhere between the min and max and
// multiply by the attemptNum. attemptNum starts at zero so we always
// increment here. We first get a random percentage, then apply that to the
// difference between min and max, and add to min.
jitter := rand.Float64() * float64(max-min)
jitterMin := int64(jitter) + int64(min)
return time.Duration(jitterMin * int64(attemptNum))
}
// PassthroughErrorHandler is an ErrorHandler that directly passes through the
// values from the net/http library for the final request. The body is not
// closed.
func PassthroughErrorHandler(resp *http.Response, err error, _ int) (*http.Response, error) {
return resp, err
}
// Do wraps calling an HTTP method with retries.
func (c *Client) Do(req *Request) (*http.Response, error) {
if c.Logger != nil {
c.Logger.Printf("[DEBUG] %s %s", req.Method, req.URL)
}
var resp *http.Response
var err error
for i := 0; ; i++ {
var code int // HTTP response code
// Always rewind the request body when non-nil.
if req.body != nil {
body, err := req.body()
if err != nil {
return resp, err
}
if c, ok := body.(io.ReadCloser); ok {
req.Request.Body = c
} else {
req.Request.Body = ioutil.NopCloser(body)
}
}
if c.RequestLogHook != nil {
c.RequestLogHook(c.Logger, req.Request, i)
}
// Attempt the request
resp, err = c.HTTPClient.Do(req.Request)
if resp != nil {
code = resp.StatusCode
}
// Check if we should continue with retries.
checkOK, checkErr := c.CheckRetry(req.Request.Context(), resp, err)
if err != nil {
if c.Logger != nil {
c.Logger.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err)
}
} else {
// Call this here to maintain the behavior of logging all requests,
// even if CheckRetry signals to stop.
if c.ResponseLogHook != nil {
// Call the response logger function if provided.
c.ResponseLogHook(c.Logger, resp)
}
}
// Now decide if we should continue.
if !checkOK {
if checkErr != nil {
err = checkErr
}
return resp, err
}
// We do this before drainBody beause there's no need for the I/O if
// we're breaking out
remain := c.RetryMax - i
if remain <= 0 {
break
}
// We're going to retry, consume any response to reuse the connection.
if err == nil && resp != nil {
c.drainBody(resp.Body)
}
wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp)
desc := fmt.Sprintf("%s %s", req.Method, req.URL)
if code > 0 {
desc = fmt.Sprintf("%s (status: %d)", desc, code)
}
if c.Logger != nil {
c.Logger.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain)
}
time.Sleep(wait)
}
if c.ErrorHandler != nil {
return c.ErrorHandler(resp, err, c.RetryMax+1)
}
// By default, we close the response body and return an error without
// returning the response
if resp != nil {
resp.Body.Close()
}
return nil, fmt.Errorf("%s %s giving up after %d attempts",
req.Method, req.URL, c.RetryMax+1)
}
// Try to read the response body so we can reuse this connection.
func (c *Client) drainBody(body io.ReadCloser) {
defer body.Close()
_, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit))
if err != nil {
if c.Logger != nil {
c.Logger.Printf("[ERR] error reading response body: %v", err)
}
}
}
// Get is a shortcut for doing a GET request without making a new client.
func Get(url string) (*http.Response, error) {
return defaultClient.Get(url)
}
// Get is a convenience helper for doing simple GET requests.
func (c *Client) Get(url string) (*http.Response, error) {
req, err := NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
return c.Do(req)
}
// Head is a shortcut for doing a HEAD request without making a new client.
func Head(url string) (*http.Response, error) {
return defaultClient.Head(url)
}
// Head is a convenience method for doing simple HEAD requests.
func (c *Client) Head(url string) (*http.Response, error) {
req, err := NewRequest("HEAD", url, nil)
if err != nil {
return nil, err
}
return c.Do(req)
}
// Post is a shortcut for doing a POST request without making a new client.
func Post(url, bodyType string, body interface{}) (*http.Response, error) {
return defaultClient.Post(url, bodyType, body)
}
// Post is a convenience method for doing simple POST requests.
func (c *Client) Post(url, bodyType string, body interface{}) (*http.Response, error) {
req, err := NewRequest("POST", url, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", bodyType)
return c.Do(req)
}
// PostForm is a shortcut to perform a POST with form data without creating
// a new client.
func PostForm(url string, data url.Values) (*http.Response, error) {
return defaultClient.PostForm(url, data)
}
// PostForm is a convenience method for doing simple POST operations using
// pre-filled url.Values form data.
func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) {
return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
}

View File

@ -16,7 +16,7 @@ var (
// Centralize all regexps and regexp.Copy() where necessary. // Centralize all regexps and regexp.Copy() where necessary.
signRE *regexp.Regexp = regexp.MustCompile(`^[\s]*[+-]`) signRE *regexp.Regexp = regexp.MustCompile(`^[\s]*[+-]`)
whitespaceRE *regexp.Regexp = regexp.MustCompile(`[\s]+`) whitespaceRE *regexp.Regexp = regexp.MustCompile(`[\s]+`)
ifNameRE *regexp.Regexp = regexp.MustCompile(`^Ethernet adapter ([^:]+):`) ifNameRE *regexp.Regexp = regexp.MustCompile(`^(?:Ethernet|Wireless LAN) adapter ([^:]+):`)
ipAddrRE *regexp.Regexp = regexp.MustCompile(`^ IPv[46] Address\. \. \. \. \. \. \. \. \. \. \. : ([^\s]+)`) ipAddrRE *regexp.Regexp = regexp.MustCompile(`^ IPv[46] Address\. \. \. \. \. \. \. \. \. \. \. : ([^\s]+)`)
) )

View File

@ -1,5 +1,7 @@
package api package api
import "context"
// TokenAuth is used to perform token backend operations on Vault // TokenAuth is used to perform token backend operations on Vault
type TokenAuth struct { type TokenAuth struct {
c *Client c *Client
@ -16,7 +18,9 @@ func (c *TokenAuth) Create(opts *TokenCreateRequest) (*Secret, error) {
return nil, err return nil, err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -31,7 +35,9 @@ func (c *TokenAuth) CreateOrphan(opts *TokenCreateRequest) (*Secret, error) {
return nil, err return nil, err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -46,7 +52,9 @@ func (c *TokenAuth) CreateWithRole(opts *TokenCreateRequest, roleName string) (*
return nil, err return nil, err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -63,7 +71,9 @@ func (c *TokenAuth) Lookup(token string) (*Secret, error) {
return nil, err return nil, err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -79,7 +89,10 @@ func (c *TokenAuth) LookupAccessor(accessor string) (*Secret, error) {
}); err != nil { }); err != nil {
return nil, err return nil, err
} }
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -91,7 +104,9 @@ func (c *TokenAuth) LookupAccessor(accessor string) (*Secret, error) {
func (c *TokenAuth) LookupSelf() (*Secret, error) { func (c *TokenAuth) LookupSelf() (*Secret, error) {
r := c.c.NewRequest("GET", "/v1/auth/token/lookup-self") r := c.c.NewRequest("GET", "/v1/auth/token/lookup-self")
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -109,7 +124,9 @@ func (c *TokenAuth) Renew(token string, increment int) (*Secret, error) {
return nil, err return nil, err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -126,7 +143,9 @@ func (c *TokenAuth) RenewSelf(increment int) (*Secret, error) {
return nil, err return nil, err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -146,7 +165,9 @@ func (c *TokenAuth) RenewTokenAsSelf(token string, increment int) (*Secret, erro
return nil, err return nil, err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -164,7 +185,10 @@ func (c *TokenAuth) RevokeAccessor(accessor string) error {
}); err != nil { }); err != nil {
return err return err
} }
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return err return err
} }
@ -183,7 +207,9 @@ func (c *TokenAuth) RevokeOrphan(token string) error {
return err return err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return err return err
} }
@ -197,7 +223,10 @@ func (c *TokenAuth) RevokeOrphan(token string) error {
// an effect. // an effect.
func (c *TokenAuth) RevokeSelf(token string) error { func (c *TokenAuth) RevokeSelf(token string) error {
r := c.c.NewRequest("PUT", "/v1/auth/token/revoke-self") r := c.c.NewRequest("PUT", "/v1/auth/token/revoke-self")
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return err return err
} }
@ -217,7 +246,9 @@ func (c *TokenAuth) RevokeTree(token string) error {
return err return err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return err return err
} }

View File

@ -1,6 +1,7 @@
package api package api
import ( import (
"context"
"crypto/tls" "crypto/tls"
"fmt" "fmt"
"net" "net"
@ -16,10 +17,11 @@ import (
"github.com/hashicorp/errwrap" "github.com/hashicorp/errwrap"
"github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-cleanhttp"
retryablehttp "github.com/hashicorp/go-retryablehttp"
"github.com/hashicorp/go-rootcerts" "github.com/hashicorp/go-rootcerts"
"github.com/hashicorp/vault/helper/parseutil" "github.com/hashicorp/vault/helper/parseutil"
"github.com/sethgrid/pester"
"golang.org/x/net/http2" "golang.org/x/net/http2"
"golang.org/x/time/rate"
) )
const EnvVaultAddress = "VAULT_ADDR" const EnvVaultAddress = "VAULT_ADDR"
@ -34,6 +36,7 @@ const EnvVaultWrapTTL = "VAULT_WRAP_TTL"
const EnvVaultMaxRetries = "VAULT_MAX_RETRIES" const EnvVaultMaxRetries = "VAULT_MAX_RETRIES"
const EnvVaultToken = "VAULT_TOKEN" const EnvVaultToken = "VAULT_TOKEN"
const EnvVaultMFA = "VAULT_MFA" const EnvVaultMFA = "VAULT_MFA"
const EnvRateLimit = "VAULT_RATE_LIMIT"
// WrappingLookupFunc is a function that, given an HTTP verb and a path, // WrappingLookupFunc is a function that, given an HTTP verb and a path,
// returns an optional string duration to be used for response wrapping (e.g. // returns an optional string duration to be used for response wrapping (e.g.
@ -59,8 +62,9 @@ type Config struct {
// (or http.DefaultClient). // (or http.DefaultClient).
HttpClient *http.Client HttpClient *http.Client
// MaxRetries controls the maximum number of times to retry when a 5xx error // MaxRetries controls the maximum number of times to retry when a 5xx
// occurs. Set to 0 or less to disable retrying. Defaults to 0. // error occurs. Set to 0 to disable retrying. Defaults to 2 (for a total
// of three tries).
MaxRetries int MaxRetries int
// Timeout is for setting custom timeout parameter in the HttpClient // Timeout is for setting custom timeout parameter in the HttpClient
@ -69,6 +73,16 @@ type Config struct {
// If there is an error when creating the configuration, this will be the // If there is an error when creating the configuration, this will be the
// error // error
Error error Error error
// The Backoff function to use; a default is used if not provided
Backoff retryablehttp.Backoff
// Limiter is the rate limiter used by the client.
// If this pointer is nil, then there will be no limit set.
// In contrast, if this pointer is set, even to an empty struct,
// then that limiter will be used. Note that an empty Limiter
// is equivalent blocking all events.
Limiter *rate.Limiter
} }
// TLSConfig contains the parameters needed to configure TLS on the HTTP client // TLSConfig contains the parameters needed to configure TLS on the HTTP client
@ -131,12 +145,15 @@ func DefaultConfig() *Config {
// but in e.g. http_test actual redirect handling is necessary // but in e.g. http_test actual redirect handling is necessary
config.HttpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { config.HttpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
// Returning this value causes the Go net library to not close the // Returning this value causes the Go net library to not close the
// response body and to nil out the error. Otherwise pester tries // response body and to nil out the error. Otherwise retry clients may
// three times on every redirect because it sees an error from this // try three times on every redirect because it sees an error from this
// function (to prevent redirects) passing through to it. // function (to prevent redirects) passing through to it.
return http.ErrUseLastResponse return http.ErrUseLastResponse
} }
config.Backoff = retryablehttp.LinearJitterBackoff
config.MaxRetries = 2
return config return config
} }
@ -205,6 +222,7 @@ func (c *Config) ReadEnvironment() error {
var envInsecure bool var envInsecure bool
var envTLSServerName string var envTLSServerName string
var envMaxRetries *uint64 var envMaxRetries *uint64
var limit *rate.Limiter
// Parse the environment variables // Parse the environment variables
if v := os.Getenv(EnvVaultAddress); v != "" { if v := os.Getenv(EnvVaultAddress); v != "" {
@ -229,6 +247,13 @@ func (c *Config) ReadEnvironment() error {
if v := os.Getenv(EnvVaultClientKey); v != "" { if v := os.Getenv(EnvVaultClientKey); v != "" {
envClientKey = v envClientKey = v
} }
if v := os.Getenv(EnvRateLimit); v != "" {
rateLimit, burstLimit, err := parseRateLimit(v)
if err != nil {
return err
}
limit = rate.NewLimiter(rate.Limit(rateLimit), burstLimit)
}
if t := os.Getenv(EnvVaultClientTimeout); t != "" { if t := os.Getenv(EnvVaultClientTimeout); t != "" {
clientTimeout, err := parseutil.ParseDurationSecond(t) clientTimeout, err := parseutil.ParseDurationSecond(t)
if err != nil { if err != nil {
@ -260,6 +285,8 @@ func (c *Config) ReadEnvironment() error {
c.modifyLock.Lock() c.modifyLock.Lock()
defer c.modifyLock.Unlock() defer c.modifyLock.Unlock()
c.Limiter = limit
if err := c.ConfigureTLS(t); err != nil { if err := c.ConfigureTLS(t); err != nil {
return err return err
} }
@ -269,7 +296,7 @@ func (c *Config) ReadEnvironment() error {
} }
if envMaxRetries != nil { if envMaxRetries != nil {
c.MaxRetries = int(*envMaxRetries) + 1 c.MaxRetries = int(*envMaxRetries)
} }
if envClientTimeout != 0 { if envClientTimeout != 0 {
@ -279,6 +306,21 @@ func (c *Config) ReadEnvironment() error {
return nil return nil
} }
func parseRateLimit(val string) (rate float64, burst int, err error) {
_, err = fmt.Sscanf(val, "%f:%d", &rate, &burst)
if err != nil {
rate, err = strconv.ParseFloat(val, 64)
if err != nil {
err = fmt.Errorf("%v was provided but incorrectly formatted", EnvRateLimit)
}
burst = int(rate)
}
return rate, burst, err
}
// Client is the client to the Vault API. Create a client with NewClient. // Client is the client to the Vault API. Create a client with NewClient.
type Client struct { type Client struct {
modifyLock sync.RWMutex modifyLock sync.RWMutex
@ -346,11 +388,12 @@ func (c *Client) SetAddress(addr string) error {
c.modifyLock.Lock() c.modifyLock.Lock()
defer c.modifyLock.Unlock() defer c.modifyLock.Unlock()
var err error parsedAddr, err := url.Parse(addr)
if c.addr, err = url.Parse(addr); err != nil { if err != nil {
return errwrap.Wrapf("failed to set address: {{err}}", err) return errwrap.Wrapf("failed to set address: {{err}}", err)
} }
c.addr = parsedAddr
return nil return nil
} }
@ -362,6 +405,18 @@ func (c *Client) Address() string {
return c.addr.String() return c.addr.String()
} }
// SetLimiter will set the rate limiter for this client.
// This method is thread-safe.
// rateLimit and burst are specified according to https://godoc.org/golang.org/x/time/rate#NewLimiter
func (c *Client) SetLimiter(rateLimit float64, burst int) {
c.modifyLock.RLock()
c.config.modifyLock.Lock()
defer c.config.modifyLock.Unlock()
c.modifyLock.RUnlock()
c.config.Limiter = rate.NewLimiter(rate.Limit(rateLimit), burst)
}
// SetMaxRetries sets the number of retries that will be used in the case of certain errors // SetMaxRetries sets the number of retries that will be used in the case of certain errors
func (c *Client) SetMaxRetries(retries int) { func (c *Client) SetMaxRetries(retries int) {
c.modifyLock.RLock() c.modifyLock.RLock()
@ -382,6 +437,15 @@ func (c *Client) SetClientTimeout(timeout time.Duration) {
c.config.Timeout = timeout c.config.Timeout = timeout
} }
// CurrentWrappingLookupFunc sets a lookup function that returns desired wrap TTLs
// for a given operation and path
func (c *Client) CurrentWrappingLookupFunc() WrappingLookupFunc {
c.modifyLock.RLock()
defer c.modifyLock.RUnlock()
return c.wrappingLookupFunc
}
// SetWrappingLookupFunc sets a lookup function that returns desired wrap TTLs // SetWrappingLookupFunc sets a lookup function that returns desired wrap TTLs
// for a given operation and path // for a given operation and path
func (c *Client) SetWrappingLookupFunc(lookupFunc WrappingLookupFunc) { func (c *Client) SetWrappingLookupFunc(lookupFunc WrappingLookupFunc) {
@ -434,6 +498,16 @@ func (c *Client) SetHeaders(headers http.Header) {
c.headers = headers c.headers = headers
} }
// SetBackoff sets the backoff function to be used for future requests.
func (c *Client) SetBackoff(backoff retryablehttp.Backoff) {
c.modifyLock.RLock()
c.config.modifyLock.Lock()
defer c.config.modifyLock.Unlock()
c.modifyLock.RUnlock()
c.config.Backoff = backoff
}
// Clone creates a new client with the same configuration. Note that the same // Clone creates a new client with the same configuration. Note that the same
// underlying http.Client is used; modifying the client from more than one // underlying http.Client is used; modifying the client from more than one
// goroutine at once may not be safe, so modify the client as needed and then // goroutine at once may not be safe, so modify the client as needed and then
@ -449,6 +523,8 @@ func (c *Client) Clone() (*Client, error) {
HttpClient: config.HttpClient, HttpClient: config.HttpClient,
MaxRetries: config.MaxRetries, MaxRetries: config.MaxRetries,
Timeout: config.Timeout, Timeout: config.Timeout,
Backoff: config.Backoff,
Limiter: config.Limiter,
} }
config.modifyLock.RUnlock() config.modifyLock.RUnlock()
@ -470,14 +546,20 @@ func (c *Client) SetPolicyOverride(override bool) {
// doesn't need to be called externally. // doesn't need to be called externally.
func (c *Client) NewRequest(method, requestPath string) *Request { func (c *Client) NewRequest(method, requestPath string) *Request {
c.modifyLock.RLock() c.modifyLock.RLock()
defer c.modifyLock.RUnlock() addr := c.addr
token := c.token
mfaCreds := c.mfaCreds
wrappingLookupFunc := c.wrappingLookupFunc
headers := c.headers
policyOverride := c.policyOverride
c.modifyLock.RUnlock()
// if SRV records exist (see https://tools.ietf.org/html/draft-andrews-http-srv-02), lookup the SRV // if SRV records exist (see https://tools.ietf.org/html/draft-andrews-http-srv-02), lookup the SRV
// record and take the highest match; this is not designed for high-availability, just discovery // record and take the highest match; this is not designed for high-availability, just discovery
var host string = c.addr.Host var host string = addr.Host
if c.addr.Port() == "" { if addr.Port() == "" {
// Internet Draft specifies that the SRV record is ignored if a port is given // Internet Draft specifies that the SRV record is ignored if a port is given
_, addrs, err := net.LookupSRV("http", "tcp", c.addr.Hostname()) _, addrs, err := net.LookupSRV("http", "tcp", addr.Hostname())
if err == nil && len(addrs) > 0 { if err == nil && len(addrs) > 0 {
host = fmt.Sprintf("%s:%d", addrs[0].Target, addrs[0].Port) host = fmt.Sprintf("%s:%d", addrs[0].Target, addrs[0].Port)
} }
@ -486,12 +568,12 @@ func (c *Client) NewRequest(method, requestPath string) *Request {
req := &Request{ req := &Request{
Method: method, Method: method,
URL: &url.URL{ URL: &url.URL{
User: c.addr.User, User: addr.User,
Scheme: c.addr.Scheme, Scheme: addr.Scheme,
Host: host, Host: host,
Path: path.Join(c.addr.Path, requestPath), Path: path.Join(addr.Path, requestPath),
}, },
ClientToken: c.token, ClientToken: token,
Params: make(map[string][]string), Params: make(map[string][]string),
} }
@ -505,21 +587,19 @@ func (c *Client) NewRequest(method, requestPath string) *Request {
lookupPath = requestPath lookupPath = requestPath
} }
req.MFAHeaderVals = c.mfaCreds req.MFAHeaderVals = mfaCreds
if c.wrappingLookupFunc != nil { if wrappingLookupFunc != nil {
req.WrapTTL = c.wrappingLookupFunc(method, lookupPath) req.WrapTTL = wrappingLookupFunc(method, lookupPath)
} else { } else {
req.WrapTTL = DefaultWrappingLookupFunc(method, lookupPath) req.WrapTTL = DefaultWrappingLookupFunc(method, lookupPath)
} }
if c.config.Timeout != 0 {
c.config.HttpClient.Timeout = c.config.Timeout if headers != nil {
} req.Headers = headers
if c.headers != nil {
req.Headers = c.headers
} }
req.PolicyOverride = c.policyOverride req.PolicyOverride = policyOverride
return req return req
} }
@ -528,12 +608,30 @@ func (c *Client) NewRequest(method, requestPath string) *Request {
// a Vault server not configured with this client. This is an advanced operation // a Vault server not configured with this client. This is an advanced operation
// that generally won't need to be called externally. // that generally won't need to be called externally.
func (c *Client) RawRequest(r *Request) (*Response, error) { func (c *Client) RawRequest(r *Request) (*Response, error) {
return c.RawRequestWithContext(context.Background(), r)
}
// RawRequestWithContext performs the raw request given. This request may be against
// a Vault server not configured with this client. This is an advanced operation
// that generally won't need to be called externally.
func (c *Client) RawRequestWithContext(ctx context.Context, r *Request) (*Response, error) {
c.modifyLock.RLock() c.modifyLock.RLock()
c.config.modifyLock.RLock()
defer c.config.modifyLock.RUnlock()
token := c.token token := c.token
c.config.modifyLock.RLock()
limiter := c.config.Limiter
maxRetries := c.config.MaxRetries
backoff := c.config.Backoff
httpClient := c.config.HttpClient
timeout := c.config.Timeout
c.config.modifyLock.RUnlock()
c.modifyLock.RUnlock() c.modifyLock.RUnlock()
if limiter != nil {
limiter.Wait(ctx)
}
// Sanity check the token before potentially erroring from the API // Sanity check the token before potentially erroring from the API
idx := strings.IndexFunc(token, func(c rune) bool { idx := strings.IndexFunc(token, func(c rune) bool {
return !unicode.IsPrint(c) return !unicode.IsPrint(c)
@ -544,14 +642,32 @@ func (c *Client) RawRequest(r *Request) (*Response, error) {
redirectCount := 0 redirectCount := 0
START: START:
req, err := r.ToHTTP() req, err := r.toRetryableHTTP()
if err != nil { if err != nil {
return nil, err return nil, err
} }
if req == nil {
return nil, fmt.Errorf("nil request created")
}
client := pester.NewExtendedClient(c.config.HttpClient) if timeout != 0 {
client.Backoff = pester.LinearJitterBackoff ctx, _ = context.WithTimeout(ctx, timeout)
client.MaxRetries = c.config.MaxRetries }
req.Request = req.Request.WithContext(ctx)
if backoff == nil {
backoff = retryablehttp.LinearJitterBackoff
}
client := &retryablehttp.Client{
HTTPClient: httpClient,
RetryWaitMin: 1000 * time.Millisecond,
RetryWaitMax: 1500 * time.Millisecond,
RetryMax: maxRetries,
CheckRetry: retryablehttp.DefaultRetryPolicy,
Backoff: backoff,
ErrorHandler: retryablehttp.PassthroughErrorHandler,
}
var result *Response var result *Response
resp, err := client.Do(req) resp, err := client.Do(req)

View File

@ -1,6 +1,7 @@
package api package api
import ( import (
"context"
"fmt" "fmt"
) )
@ -8,7 +9,10 @@ import (
func (c *Client) Help(path string) (*Help, error) { func (c *Client) Help(path string) (*Help, error) {
r := c.NewRequest("GET", fmt.Sprintf("/v1/%s", path)) r := c.NewRequest("GET", fmt.Sprintf("/v1/%s", path))
r.Params.Add("help", "1") r.Params.Add("help", "1")
resp, err := c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -2,9 +2,9 @@ package api
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"io" "io"
"net/http"
"os" "os"
"github.com/hashicorp/errwrap" "github.com/hashicorp/errwrap"
@ -47,7 +47,10 @@ func (c *Client) Logical() *Logical {
func (c *Logical) Read(path string) (*Secret, error) { func (c *Logical) Read(path string) (*Secret, error) {
r := c.c.NewRequest("GET", "/v1/"+path) r := c.c.NewRequest("GET", "/v1/"+path)
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if resp != nil { if resp != nil {
defer resp.Body.Close() defer resp.Body.Close()
} }
@ -78,7 +81,10 @@ func (c *Logical) List(path string) (*Secret, error) {
// handle the wrapping lookup function // handle the wrapping lookup function
r.Method = "GET" r.Method = "GET"
r.Params.Set("list", "true") r.Params.Set("list", "true")
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if resp != nil { if resp != nil {
defer resp.Body.Close() defer resp.Body.Close()
} }
@ -109,7 +115,9 @@ func (c *Logical) Write(path string, data map[string]interface{}) (*Secret, erro
return nil, err return nil, err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if resp != nil { if resp != nil {
defer resp.Body.Close() defer resp.Body.Close()
} }
@ -130,16 +138,15 @@ func (c *Logical) Write(path string, data map[string]interface{}) (*Secret, erro
return nil, err return nil, err
} }
if resp.StatusCode == 200 {
return ParseSecret(resp.Body) return ParseSecret(resp.Body)
}
return nil, nil
} }
func (c *Logical) Delete(path string) (*Secret, error) { func (c *Logical) Delete(path string) (*Secret, error) {
r := c.c.NewRequest("DELETE", "/v1/"+path) r := c.c.NewRequest("DELETE", "/v1/"+path)
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if resp != nil { if resp != nil {
defer resp.Body.Close() defer resp.Body.Close()
} }
@ -160,11 +167,7 @@ func (c *Logical) Delete(path string) (*Secret, error) {
return nil, err return nil, err
} }
if resp.StatusCode == 200 {
return ParseSecret(resp.Body) return ParseSecret(resp.Body)
}
return nil, nil
} }
func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) { func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) {
@ -184,35 +187,44 @@ func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) {
return nil, err return nil, err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if resp != nil { if resp != nil {
defer resp.Body.Close() defer resp.Body.Close()
} }
if resp == nil || resp.StatusCode != 404 {
// Return all errors except those that are from a 404 as we handle the not if err != nil {
// found error as a special case.
if err != nil && (resp == nil || resp.StatusCode != 404) {
return nil, err return nil, err
} }
if resp == nil { if resp == nil {
return nil, nil return nil, nil
} }
switch resp.StatusCode {
case http.StatusOK: // New method is supported
return ParseSecret(resp.Body) return ParseSecret(resp.Body)
case http.StatusNotFound: // Fall back to old method
default:
return nil, nil
} }
// In the 404 case this may actually be a wrapped 404 error
secret, parseErr := ParseSecret(resp.Body)
switch parseErr {
case nil:
case io.EOF:
return nil, nil
default:
return nil, err
}
if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) {
return secret, nil
}
// Otherwise this might be an old-style wrapping token so attempt the old
// method
if wrappingToken != "" { if wrappingToken != "" {
origToken := c.c.Token() origToken := c.c.Token()
defer c.c.SetToken(origToken) defer c.c.SetToken(origToken)
c.c.SetToken(wrappingToken) c.c.SetToken(wrappingToken)
} }
secret, err := c.Read(wrappedResponseLocation) secret, err = c.Read(wrappedResponseLocation)
if err != nil { if err != nil {
return nil, errwrap.Wrapf(fmt.Sprintf("error reading %q: {{err}}", wrappedResponseLocation), err) return nil, errwrap.Wrapf(fmt.Sprintf("error reading %q: {{err}}", wrappedResponseLocation), err)
} }

View File

@ -64,9 +64,7 @@ type RenewerInput struct {
// Secret is the secret to renew // Secret is the secret to renew
Secret *Secret Secret *Secret
// Grace is a minimum renewal before returning so the upstream client // DEPRECATED: this does not do anything.
// can do a re-read. This can be used to prevent clients from waiting
// too long to read a new credential and incur downtime.
Grace time.Duration Grace time.Duration
// Rand is the randomizer to use for underlying randomization. If not // Rand is the randomizer to use for underlying randomization. If not
@ -107,8 +105,6 @@ func (c *Client) NewRenewer(i *RenewerInput) (*Renewer, error) {
return nil, ErrRenewerMissingSecret return nil, ErrRenewerMissingSecret
} }
grace := i.Grace
random := i.Rand random := i.Rand
if random == nil { if random == nil {
random = rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) random = rand.New(rand.NewSource(int64(time.Now().Nanosecond())))
@ -122,7 +118,6 @@ func (c *Client) NewRenewer(i *RenewerInput) (*Renewer, error) {
return &Renewer{ return &Renewer{
client: c, client: c,
secret: secret, secret: secret,
grace: grace,
increment: i.Increment, increment: i.Increment,
random: random, random: random,
doneCh: make(chan error, 1), doneCh: make(chan error, 1),
@ -166,10 +161,7 @@ func (r *Renewer) Renew() {
result = r.renewLease() result = r.renewLease()
} }
select { r.doneCh <- result
case r.doneCh <- result:
case <-r.stopCh:
}
} }
// renewAuth is a helper for renewing authentication. // renewAuth is a helper for renewing authentication.

View File

@ -4,8 +4,11 @@ import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
retryablehttp "github.com/hashicorp/go-retryablehttp"
) )
// Request is a raw request configuration structure used to initiate // Request is a raw request configuration structure used to initiate
@ -19,6 +22,12 @@ type Request struct {
MFAHeaderVals []string MFAHeaderVals []string
WrapTTL string WrapTTL string
Obj interface{} Obj interface{}
// When possible, use BodyBytes as it is more efficient due to how the
// retry logic works
BodyBytes []byte
// Fallback
Body io.Reader Body io.Reader
BodySize int64 BodySize int64
@ -30,34 +39,73 @@ type Request struct {
// SetJSONBody is used to set a request body that is a JSON-encoded value. // SetJSONBody is used to set a request body that is a JSON-encoded value.
func (r *Request) SetJSONBody(val interface{}) error { func (r *Request) SetJSONBody(val interface{}) error {
buf := bytes.NewBuffer(nil) buf, err := json.Marshal(val)
enc := json.NewEncoder(buf) if err != nil {
if err := enc.Encode(val); err != nil {
return err return err
} }
r.Obj = val r.Obj = val
r.Body = buf r.BodyBytes = buf
r.BodySize = int64(buf.Len())
return nil return nil
} }
// ResetJSONBody is used to reset the body for a redirect // ResetJSONBody is used to reset the body for a redirect
func (r *Request) ResetJSONBody() error { func (r *Request) ResetJSONBody() error {
if r.Body == nil { if r.BodyBytes == nil {
return nil return nil
} }
return r.SetJSONBody(r.Obj) return r.SetJSONBody(r.Obj)
} }
// ToHTTP turns this request into a valid *http.Request for use with the // DEPRECATED: ToHTTP turns this request into a valid *http.Request for use
// net/http package. // with the net/http package.
func (r *Request) ToHTTP() (*http.Request, error) { func (r *Request) ToHTTP() (*http.Request, error) {
req, err := r.toRetryableHTTP()
if err != nil {
return nil, err
}
switch {
case r.BodyBytes == nil && r.Body == nil:
// No body
case r.BodyBytes != nil:
req.Request.Body = ioutil.NopCloser(bytes.NewReader(r.BodyBytes))
default:
if c, ok := r.Body.(io.ReadCloser); ok {
req.Request.Body = c
} else {
req.Request.Body = ioutil.NopCloser(r.Body)
}
}
return req.Request, nil
}
func (r *Request) toRetryableHTTP() (*retryablehttp.Request, error) {
// Encode the query parameters // Encode the query parameters
r.URL.RawQuery = r.Params.Encode() r.URL.RawQuery = r.Params.Encode()
// Create the HTTP request // Create the HTTP request, defaulting to retryable
req, err := http.NewRequest(r.Method, r.URL.RequestURI(), r.Body) var req *retryablehttp.Request
var err error
var body interface{}
switch {
case r.BodyBytes == nil && r.Body == nil:
// No body
case r.BodyBytes != nil:
// Use bytes, it's more efficient
body = r.BodyBytes
default:
body = r.Body
}
req, err = retryablehttp.NewRequest(r.Method, r.URL.RequestURI(), body)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -1,6 +1,7 @@
package api package api
import ( import (
"bytes"
"fmt" "fmt"
"io" "io"
"time" "time"
@ -101,7 +102,8 @@ func (s *Secret) TokenRemainingUses() (int, error) {
} }
// TokenPolicies returns the standardized list of policies for the given secret. // TokenPolicies returns the standardized list of policies for the given secret.
// If the secret is nil or does not contain any policies, this returns nil. // If the secret is nil or does not contain any policies, this returns nil. It
// also populates the secret's Auth info with identity/token policy info.
func (s *Secret) TokenPolicies() ([]string, error) { func (s *Secret) TokenPolicies() ([]string, error) {
if s == nil { if s == nil {
return nil, nil return nil, nil
@ -115,24 +117,74 @@ func (s *Secret) TokenPolicies() ([]string, error) {
return nil, nil return nil, nil
} }
var tokenPolicies []string
// Token policies
{
_, ok := s.Data["policies"]
if !ok {
goto TOKEN_DONE
}
sList, ok := s.Data["policies"].([]string) sList, ok := s.Data["policies"].([]string)
if ok { if ok {
return sList, nil tokenPolicies = sList
goto TOKEN_DONE
} }
list, ok := s.Data["policies"].([]interface{}) list, ok := s.Data["policies"].([]interface{})
if !ok { if !ok {
return nil, fmt.Errorf("unable to convert token policies to expected format") return nil, fmt.Errorf("unable to convert token policies to expected format")
} }
for _, v := range list {
policies := make([]string, len(list)) p, ok := v.(string)
for i := range list {
p, ok := list[i].(string)
if !ok { if !ok {
return nil, fmt.Errorf("unable to convert policy %v to string", list[i]) return nil, fmt.Errorf("unable to convert policy %v to string", v)
} }
policies[i] = p tokenPolicies = append(tokenPolicies, p)
} }
}
TOKEN_DONE:
var identityPolicies []string
// Identity policies
{
_, ok := s.Data["identity_policies"]
if !ok {
goto DONE
}
sList, ok := s.Data["identity_policies"].([]string)
if ok {
identityPolicies = sList
goto DONE
}
list, ok := s.Data["identity_policies"].([]interface{})
if !ok {
return nil, fmt.Errorf("unable to convert identity policies to expected format")
}
for _, v := range list {
p, ok := v.(string)
if !ok {
return nil, fmt.Errorf("unable to convert policy %v to string", v)
}
identityPolicies = append(identityPolicies, p)
}
}
DONE:
if s.Auth == nil {
s.Auth = &SecretAuth{}
}
policies := append(tokenPolicies, identityPolicies...)
s.Auth.TokenPolicies = tokenPolicies
s.Auth.IdentityPolicies = identityPolicies
s.Auth.Policies = policies
return policies, nil return policies, nil
} }
@ -237,6 +289,8 @@ type SecretAuth struct {
ClientToken string `json:"client_token"` ClientToken string `json:"client_token"`
Accessor string `json:"accessor"` Accessor string `json:"accessor"`
Policies []string `json:"policies"` Policies []string `json:"policies"`
TokenPolicies []string `json:"token_policies"`
IdentityPolicies []string `json:"identity_policies"`
Metadata map[string]string `json:"metadata"` Metadata map[string]string `json:"metadata"`
LeaseDuration int `json:"lease_duration"` LeaseDuration int `json:"lease_duration"`
@ -245,9 +299,20 @@ type SecretAuth struct {
// ParseSecret is used to parse a secret value from JSON from an io.Reader. // ParseSecret is used to parse a secret value from JSON from an io.Reader.
func ParseSecret(r io.Reader) (*Secret, error) { func ParseSecret(r io.Reader) (*Secret, error) {
// First read the data into a buffer. Not super efficient but we want to
// know if we actually have a body or not.
var buf bytes.Buffer
_, err := buf.ReadFrom(r)
if err != nil {
return nil, err
}
if buf.Len() == 0 {
return nil, nil
}
// First decode the JSON into a map[string]interface{} // First decode the JSON into a map[string]interface{}
var secret Secret var secret Secret
if err := jsonutil.DecodeJSONFromReader(r, &secret); err != nil { if err := jsonutil.DecodeJSONFromReader(&buf, &secret); err != nil {
return nil, err return nil, err
} }

View File

@ -1,6 +1,9 @@
package api package api
import "fmt" import (
"context"
"fmt"
)
// SSH is used to return a client to invoke operations on SSH backend. // SSH is used to return a client to invoke operations on SSH backend.
type SSH struct { type SSH struct {
@ -28,7 +31,9 @@ func (c *SSH) Credential(role string, data map[string]interface{}) (*Secret, err
return nil, err return nil, err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -45,7 +50,9 @@ func (c *SSH) SignKey(role string, data map[string]interface{}) (*Secret, error)
return nil, err return nil, err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -1,6 +1,7 @@
package api package api
import ( import (
"context"
"crypto/tls" "crypto/tls"
"crypto/x509" "crypto/x509"
"fmt" "fmt"
@ -13,6 +14,7 @@ import (
"github.com/hashicorp/go-rootcerts" "github.com/hashicorp/go-rootcerts"
"github.com/hashicorp/hcl" "github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/vault/helper/hclutil"
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
) )
@ -160,7 +162,7 @@ func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) {
"tls_skip_verify", "tls_skip_verify",
"tls_server_name", "tls_server_name",
} }
if err := checkHCLKeys(list, valid); err != nil { if err := hclutil.CheckHCLKeys(list, valid); err != nil {
return nil, multierror.Prefix(err, "ssh_helper:") return nil, multierror.Prefix(err, "ssh_helper:")
} }
@ -206,7 +208,9 @@ func (c *SSHHelper) Verify(otp string) (*SSHVerifyResponse, error) {
return nil, err return nil, err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -228,30 +232,3 @@ func (c *SSHHelper) Verify(otp string) (*SSHVerifyResponse, error) {
} }
return &verifyResp, nil return &verifyResp, nil
} }
func checkHCLKeys(node ast.Node, valid []string) error {
var list *ast.ObjectList
switch n := node.(type) {
case *ast.ObjectList:
list = n
case *ast.ObjectType:
list = n.List
default:
return fmt.Errorf("cannot check HCL keys of type %T", n)
}
validMap := make(map[string]struct{}, len(valid))
for _, v := range valid {
validMap[v] = struct{}{}
}
var result error
for _, item := range list.Items {
key := item.Keys[0].Token.Value().(string)
if _, ok := validMap[key]; !ok {
result = multierror.Append(result, fmt.Errorf("invalid key %q on line %d", key, item.Assign.Line))
}
}
return result
}

View File

@ -1,6 +1,7 @@
package api package api
import ( import (
"context"
"fmt" "fmt"
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
@ -16,7 +17,9 @@ func (c *Sys) AuditHash(path string, input string) (string, error) {
return "", err return "", err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -37,7 +40,11 @@ func (c *Sys) AuditHash(path string, input string) (string, error) {
func (c *Sys) ListAudit() (map[string]*Audit, error) { func (c *Sys) ListAudit() (map[string]*Audit, error) {
r := c.c.NewRequest("GET", "/v1/sys/audit") r := c.c.NewRequest("GET", "/v1/sys/audit")
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -87,7 +94,10 @@ func (c *Sys) EnableAuditWithOptions(path string, options *EnableAuditOptions) e
return err return err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return err return err
} }
@ -98,7 +108,11 @@ func (c *Sys) EnableAuditWithOptions(path string, options *EnableAuditOptions) e
func (c *Sys) DisableAudit(path string) error { func (c *Sys) DisableAudit(path string) error {
r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/audit/%s", path)) r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/audit/%s", path))
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err == nil { if err == nil {
defer resp.Body.Close() defer resp.Body.Close()
} }

View File

@ -1,6 +1,7 @@
package api package api
import ( import (
"context"
"fmt" "fmt"
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
@ -8,7 +9,10 @@ import (
func (c *Sys) ListAuth() (map[string]*AuthMount, error) { func (c *Sys) ListAuth() (map[string]*AuthMount, error) {
r := c.c.NewRequest("GET", "/v1/sys/auth") r := c.c.NewRequest("GET", "/v1/sys/auth")
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -56,7 +60,9 @@ func (c *Sys) EnableAuthWithOptions(path string, options *EnableAuthOptions) err
return err return err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return err return err
} }
@ -67,7 +73,10 @@ func (c *Sys) EnableAuthWithOptions(path string, options *EnableAuthOptions) err
func (c *Sys) DisableAuth(path string) error { func (c *Sys) DisableAuth(path string) error {
r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/auth/%s", path)) r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/auth/%s", path))
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err == nil { if err == nil {
defer resp.Body.Close() defer resp.Body.Close()
} }

View File

@ -1,6 +1,9 @@
package api package api
import "fmt" import (
"context"
"fmt"
)
func (c *Sys) CapabilitiesSelf(path string) ([]string, error) { func (c *Sys) CapabilitiesSelf(path string) ([]string, error) {
return c.Capabilities(c.c.Token(), path) return c.Capabilities(c.c.Token(), path)
@ -22,7 +25,9 @@ func (c *Sys) Capabilities(token, path string) ([]string, error) {
return nil, err return nil, err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -34,8 +39,14 @@ func (c *Sys) Capabilities(token, path string) ([]string, error) {
return nil, err return nil, err
} }
if result["capabilities"] == nil {
return nil, nil
}
var capabilities []string var capabilities []string
capabilitiesRaw := result["capabilities"].([]interface{}) capabilitiesRaw, ok := result["capabilities"].([]interface{})
if !ok {
return nil, fmt.Errorf("error interpreting returned capabilities")
}
for _, capability := range capabilitiesRaw { for _, capability := range capabilitiesRaw {
capabilities = append(capabilities, capability.(string)) capabilities = append(capabilities, capability.(string))
} }

View File

@ -1,8 +1,13 @@
package api package api
import "context"
func (c *Sys) CORSStatus() (*CORSResponse, error) { func (c *Sys) CORSStatus() (*CORSResponse, error) {
r := c.c.NewRequest("GET", "/v1/sys/config/cors") r := c.c.NewRequest("GET", "/v1/sys/config/cors")
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -19,7 +24,9 @@ func (c *Sys) ConfigureCORS(req *CORSRequest) (*CORSResponse, error) {
return nil, err return nil, err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -33,7 +40,9 @@ func (c *Sys) ConfigureCORS(req *CORSRequest) (*CORSResponse, error) {
func (c *Sys) DisableCORS() (*CORSResponse, error) { func (c *Sys) DisableCORS() (*CORSResponse, error) {
r := c.c.NewRequest("DELETE", "/v1/sys/config/cors") r := c.c.NewRequest("DELETE", "/v1/sys/config/cors")
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -1,5 +1,7 @@
package api package api
import "context"
func (c *Sys) GenerateRootStatus() (*GenerateRootStatusResponse, error) { func (c *Sys) GenerateRootStatus() (*GenerateRootStatusResponse, error) {
return c.generateRootStatusCommon("/v1/sys/generate-root/attempt") return c.generateRootStatusCommon("/v1/sys/generate-root/attempt")
} }
@ -10,7 +12,10 @@ func (c *Sys) GenerateDROperationTokenStatus() (*GenerateRootStatusResponse, err
func (c *Sys) generateRootStatusCommon(path string) (*GenerateRootStatusResponse, error) { func (c *Sys) generateRootStatusCommon(path string) (*GenerateRootStatusResponse, error) {
r := c.c.NewRequest("GET", path) r := c.c.NewRequest("GET", path)
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -40,7 +45,9 @@ func (c *Sys) generateRootInitCommon(path, otp, pgpKey string) (*GenerateRootSta
return nil, err return nil, err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -61,7 +68,10 @@ func (c *Sys) GenerateDROperationTokenCancel() error {
func (c *Sys) generateRootCancelCommon(path string) error { func (c *Sys) generateRootCancelCommon(path string) error {
r := c.c.NewRequest("DELETE", path) r := c.c.NewRequest("DELETE", path)
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err == nil { if err == nil {
defer resp.Body.Close() defer resp.Body.Close()
} }
@ -87,7 +97,9 @@ func (c *Sys) generateRootUpdateCommon(path, shard, nonce string) (*GenerateRoot
return nil, err return nil, err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -1,5 +1,7 @@
package api package api
import "context"
func (c *Sys) Health() (*HealthResponse, error) { func (c *Sys) Health() (*HealthResponse, error) {
r := c.c.NewRequest("GET", "/v1/sys/health") r := c.c.NewRequest("GET", "/v1/sys/health")
// If the code is 400 or above it will automatically turn into an error, // If the code is 400 or above it will automatically turn into an error,
@ -9,7 +11,10 @@ func (c *Sys) Health() (*HealthResponse, error) {
r.Params.Add("sealedcode", "299") r.Params.Add("sealedcode", "299")
r.Params.Add("standbycode", "299") r.Params.Add("standbycode", "299")
r.Params.Add("drsecondarycode", "299") r.Params.Add("drsecondarycode", "299")
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -1,8 +1,13 @@
package api package api
import "context"
func (c *Sys) InitStatus() (bool, error) { func (c *Sys) InitStatus() (bool, error) {
r := c.c.NewRequest("GET", "/v1/sys/init") r := c.c.NewRequest("GET", "/v1/sys/init")
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -19,7 +24,9 @@ func (c *Sys) Init(opts *InitRequest) (*InitResponse, error) {
return nil, err return nil, err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -1,8 +1,13 @@
package api package api
import "context"
func (c *Sys) Leader() (*LeaderResponse, error) { func (c *Sys) Leader() (*LeaderResponse, error) {
r := c.c.NewRequest("GET", "/v1/sys/leader") r := c.c.NewRequest("GET", "/v1/sys/leader")
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -1,5 +1,10 @@
package api package api
import (
"context"
"errors"
)
func (c *Sys) Renew(id string, increment int) (*Secret, error) { func (c *Sys) Renew(id string, increment int) (*Secret, error) {
r := c.c.NewRequest("PUT", "/v1/sys/leases/renew") r := c.c.NewRequest("PUT", "/v1/sys/leases/renew")
@ -11,7 +16,9 @@ func (c *Sys) Renew(id string, increment int) (*Secret, error) {
return nil, err return nil, err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -22,7 +29,10 @@ func (c *Sys) Renew(id string, increment int) (*Secret, error) {
func (c *Sys) Revoke(id string) error { func (c *Sys) Revoke(id string) error {
r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke/"+id) r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke/"+id)
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err == nil { if err == nil {
defer resp.Body.Close() defer resp.Body.Close()
} }
@ -31,7 +41,10 @@ func (c *Sys) Revoke(id string) error {
func (c *Sys) RevokePrefix(id string) error { func (c *Sys) RevokePrefix(id string) error {
r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke-prefix/"+id) r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke-prefix/"+id)
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err == nil { if err == nil {
defer resp.Body.Close() defer resp.Body.Close()
} }
@ -40,9 +53,53 @@ func (c *Sys) RevokePrefix(id string) error {
func (c *Sys) RevokeForce(id string) error { func (c *Sys) RevokeForce(id string) error {
r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke-force/"+id) r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke-force/"+id)
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err == nil { if err == nil {
defer resp.Body.Close() defer resp.Body.Close()
} }
return err return err
} }
func (c *Sys) RevokeWithOptions(opts *RevokeOptions) error {
if opts == nil {
return errors.New("nil options provided")
}
// Construct path
path := "/v1/sys/leases/revoke/"
switch {
case opts.Force:
path = "/v1/sys/leases/revoke-force/"
case opts.Prefix:
path = "/v1/sys/leases/revoke-prefix/"
}
path += opts.LeaseID
r := c.c.NewRequest("PUT", path)
if !opts.Force {
body := map[string]interface{}{
"sync": opts.Sync,
}
if err := r.SetJSONBody(body); err != nil {
return err
}
}
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err == nil {
defer resp.Body.Close()
}
return err
}
type RevokeOptions struct {
LeaseID string
Force bool
Prefix bool
Sync bool
}

View File

@ -1,6 +1,7 @@
package api package api
import ( import (
"context"
"fmt" "fmt"
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
@ -8,7 +9,10 @@ import (
func (c *Sys) ListMounts() (map[string]*MountOutput, error) { func (c *Sys) ListMounts() (map[string]*MountOutput, error) {
r := c.c.NewRequest("GET", "/v1/sys/mounts") r := c.c.NewRequest("GET", "/v1/sys/mounts")
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -48,7 +52,9 @@ func (c *Sys) Mount(path string, mountInfo *MountInput) error {
return err return err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return err return err
} }
@ -59,7 +65,10 @@ func (c *Sys) Mount(path string, mountInfo *MountInput) error {
func (c *Sys) Unmount(path string) error { func (c *Sys) Unmount(path string) error {
r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/mounts/%s", path)) r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/mounts/%s", path))
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err == nil { if err == nil {
defer resp.Body.Close() defer resp.Body.Close()
} }
@ -77,7 +86,9 @@ func (c *Sys) Remount(from, to string) error {
return err return err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err == nil { if err == nil {
defer resp.Body.Close() defer resp.Body.Close()
} }
@ -90,7 +101,9 @@ func (c *Sys) TuneMount(path string, config MountConfigInput) error {
return err return err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err == nil { if err == nil {
defer resp.Body.Close() defer resp.Body.Close()
} }
@ -100,7 +113,9 @@ func (c *Sys) TuneMount(path string, config MountConfigInput) error {
func (c *Sys) MountConfig(path string) (*MountConfigOutput, error) { func (c *Sys) MountConfig(path string) (*MountConfigOutput, error) {
r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/mounts/%s/tune", path)) r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/mounts/%s/tune", path))
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -128,6 +143,7 @@ type MountInput struct {
type MountConfigInput struct { type MountConfigInput struct {
Options map[string]string `json:"options" mapstructure:"options"` Options map[string]string `json:"options" mapstructure:"options"`
DefaultLeaseTTL string `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` DefaultLeaseTTL string `json:"default_lease_ttl" mapstructure:"default_lease_ttl"`
Description *string `json:"description,omitempty" mapstructure:"description"`
MaxLeaseTTL string `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` MaxLeaseTTL string `json:"max_lease_ttl" mapstructure:"max_lease_ttl"`
ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"` ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"`
PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"`

View File

@ -1,6 +1,7 @@
package api package api
import ( import (
"context"
"fmt" "fmt"
"net/http" "net/http"
) )
@ -19,7 +20,10 @@ type ListPluginsResponse struct {
func (c *Sys) ListPlugins(i *ListPluginsInput) (*ListPluginsResponse, error) { func (c *Sys) ListPlugins(i *ListPluginsInput) (*ListPluginsResponse, error) {
path := "/v1/sys/plugins/catalog" path := "/v1/sys/plugins/catalog"
req := c.c.NewRequest("LIST", path) req := c.c.NewRequest("LIST", path)
resp, err := c.c.RawRequest(req)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -54,18 +58,23 @@ type GetPluginResponse struct {
func (c *Sys) GetPlugin(i *GetPluginInput) (*GetPluginResponse, error) { func (c *Sys) GetPlugin(i *GetPluginInput) (*GetPluginResponse, error) {
path := fmt.Sprintf("/v1/sys/plugins/catalog/%s", i.Name) path := fmt.Sprintf("/v1/sys/plugins/catalog/%s", i.Name)
req := c.c.NewRequest(http.MethodGet, path) req := c.c.NewRequest(http.MethodGet, path)
resp, err := c.c.RawRequest(req)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer resp.Body.Close() defer resp.Body.Close()
var result GetPluginResponse var result struct {
Data GetPluginResponse
}
err = resp.DecodeJSON(&result) err = resp.DecodeJSON(&result)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &result, err return &result.Data, err
} }
// RegisterPluginInput is used as input to the RegisterPlugin function. // RegisterPluginInput is used as input to the RegisterPlugin function.
@ -91,7 +100,9 @@ func (c *Sys) RegisterPlugin(i *RegisterPluginInput) error {
return err return err
} }
resp, err := c.c.RawRequest(req) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, req)
if err == nil { if err == nil {
defer resp.Body.Close() defer resp.Body.Close()
} }
@ -109,7 +120,10 @@ type DeregisterPluginInput struct {
func (c *Sys) DeregisterPlugin(i *DeregisterPluginInput) error { func (c *Sys) DeregisterPlugin(i *DeregisterPluginInput) error {
path := fmt.Sprintf("/v1/sys/plugins/catalog/%s", i.Name) path := fmt.Sprintf("/v1/sys/plugins/catalog/%s", i.Name)
req := c.c.NewRequest(http.MethodDelete, path) req := c.c.NewRequest(http.MethodDelete, path)
resp, err := c.c.RawRequest(req)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, req)
if err == nil { if err == nil {
defer resp.Body.Close() defer resp.Body.Close()
} }

View File

@ -1,10 +1,16 @@
package api package api
import "fmt" import (
"context"
"fmt"
)
func (c *Sys) ListPolicies() ([]string, error) { func (c *Sys) ListPolicies() ([]string, error) {
r := c.c.NewRequest("GET", "/v1/sys/policy") r := c.c.NewRequest("GET", "/v1/sys/policy")
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -33,7 +39,10 @@ func (c *Sys) ListPolicies() ([]string, error) {
func (c *Sys) GetPolicy(name string) (string, error) { func (c *Sys) GetPolicy(name string) (string, error) {
r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/policy/%s", name)) r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/policy/%s", name))
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if resp != nil { if resp != nil {
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode == 404 { if resp.StatusCode == 404 {
@ -70,7 +79,9 @@ func (c *Sys) PutPolicy(name, rules string) error {
return err return err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return err return err
} }
@ -81,7 +92,10 @@ func (c *Sys) PutPolicy(name, rules string) error {
func (c *Sys) DeletePolicy(name string) error { func (c *Sys) DeletePolicy(name string) error {
r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/policy/%s", name)) r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/policy/%s", name))
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err == nil { if err == nil {
defer resp.Body.Close() defer resp.Body.Close()
} }

View File

@ -1,8 +1,13 @@
package api package api
import "context"
func (c *Sys) RekeyStatus() (*RekeyStatusResponse, error) { func (c *Sys) RekeyStatus() (*RekeyStatusResponse, error) {
r := c.c.NewRequest("GET", "/v1/sys/rekey/init") r := c.c.NewRequest("GET", "/v1/sys/rekey/init")
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -15,7 +20,10 @@ func (c *Sys) RekeyStatus() (*RekeyStatusResponse, error) {
func (c *Sys) RekeyRecoveryKeyStatus() (*RekeyStatusResponse, error) { func (c *Sys) RekeyRecoveryKeyStatus() (*RekeyStatusResponse, error) {
r := c.c.NewRequest("GET", "/v1/sys/rekey-recovery-key/init") r := c.c.NewRequest("GET", "/v1/sys/rekey-recovery-key/init")
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -26,13 +34,47 @@ func (c *Sys) RekeyRecoveryKeyStatus() (*RekeyStatusResponse, error) {
return &result, err return &result, err
} }
func (c *Sys) RekeyVerificationStatus() (*RekeyVerificationStatusResponse, error) {
r := c.c.NewRequest("GET", "/v1/sys/rekey/verify")
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var result RekeyVerificationStatusResponse
err = resp.DecodeJSON(&result)
return &result, err
}
func (c *Sys) RekeyRecoveryKeyVerificationStatus() (*RekeyVerificationStatusResponse, error) {
r := c.c.NewRequest("GET", "/v1/sys/rekey-recovery-key/verify")
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var result RekeyVerificationStatusResponse
err = resp.DecodeJSON(&result)
return &result, err
}
func (c *Sys) RekeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) { func (c *Sys) RekeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) {
r := c.c.NewRequest("PUT", "/v1/sys/rekey/init") r := c.c.NewRequest("PUT", "/v1/sys/rekey/init")
if err := r.SetJSONBody(config); err != nil { if err := r.SetJSONBody(config); err != nil {
return nil, err return nil, err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -49,7 +91,9 @@ func (c *Sys) RekeyRecoveryKeyInit(config *RekeyInitRequest) (*RekeyStatusRespon
return nil, err return nil, err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -62,7 +106,10 @@ func (c *Sys) RekeyRecoveryKeyInit(config *RekeyInitRequest) (*RekeyStatusRespon
func (c *Sys) RekeyCancel() error { func (c *Sys) RekeyCancel() error {
r := c.c.NewRequest("DELETE", "/v1/sys/rekey/init") r := c.c.NewRequest("DELETE", "/v1/sys/rekey/init")
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err == nil { if err == nil {
defer resp.Body.Close() defer resp.Body.Close()
} }
@ -71,7 +118,34 @@ func (c *Sys) RekeyCancel() error {
func (c *Sys) RekeyRecoveryKeyCancel() error { func (c *Sys) RekeyRecoveryKeyCancel() error {
r := c.c.NewRequest("DELETE", "/v1/sys/rekey-recovery-key/init") r := c.c.NewRequest("DELETE", "/v1/sys/rekey-recovery-key/init")
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err == nil {
defer resp.Body.Close()
}
return err
}
func (c *Sys) RekeyVerificationCancel() error {
r := c.c.NewRequest("DELETE", "/v1/sys/rekey/verify")
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err == nil {
defer resp.Body.Close()
}
return err
}
func (c *Sys) RekeyRecoveryKeyVerificationCancel() error {
r := c.c.NewRequest("DELETE", "/v1/sys/rekey-recovery-key/verify")
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err == nil { if err == nil {
defer resp.Body.Close() defer resp.Body.Close()
} }
@ -89,7 +163,9 @@ func (c *Sys) RekeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) {
return nil, err return nil, err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -111,7 +187,9 @@ func (c *Sys) RekeyRecoveryKeyUpdate(shard, nonce string) (*RekeyUpdateResponse,
return nil, err return nil, err
} }
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -124,7 +202,10 @@ func (c *Sys) RekeyRecoveryKeyUpdate(shard, nonce string) (*RekeyUpdateResponse,
func (c *Sys) RekeyRetrieveBackup() (*RekeyRetrieveResponse, error) { func (c *Sys) RekeyRetrieveBackup() (*RekeyRetrieveResponse, error) {
r := c.c.NewRequest("GET", "/v1/sys/rekey/backup") r := c.c.NewRequest("GET", "/v1/sys/rekey/backup")
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -137,7 +218,10 @@ func (c *Sys) RekeyRetrieveBackup() (*RekeyRetrieveResponse, error) {
func (c *Sys) RekeyRetrieveRecoveryBackup() (*RekeyRetrieveResponse, error) { func (c *Sys) RekeyRetrieveRecoveryBackup() (*RekeyRetrieveResponse, error) {
r := c.c.NewRequest("GET", "/v1/sys/rekey/recovery-backup") r := c.c.NewRequest("GET", "/v1/sys/rekey/recovery-backup")
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -150,7 +234,10 @@ func (c *Sys) RekeyRetrieveRecoveryBackup() (*RekeyRetrieveResponse, error) {
func (c *Sys) RekeyDeleteBackup() error { func (c *Sys) RekeyDeleteBackup() error {
r := c.c.NewRequest("DELETE", "/v1/sys/rekey/backup") r := c.c.NewRequest("DELETE", "/v1/sys/rekey/backup")
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err == nil { if err == nil {
defer resp.Body.Close() defer resp.Body.Close()
} }
@ -160,7 +247,10 @@ func (c *Sys) RekeyDeleteBackup() error {
func (c *Sys) RekeyDeleteRecoveryBackup() error { func (c *Sys) RekeyDeleteRecoveryBackup() error {
r := c.c.NewRequest("DELETE", "/v1/sys/rekey/recovery-backup") r := c.c.NewRequest("DELETE", "/v1/sys/rekey/recovery-backup")
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err == nil { if err == nil {
defer resp.Body.Close() defer resp.Body.Close()
} }
@ -168,12 +258,61 @@ func (c *Sys) RekeyDeleteRecoveryBackup() error {
return err return err
} }
func (c *Sys) RekeyVerificationUpdate(shard, nonce string) (*RekeyVerificationUpdateResponse, error) {
body := map[string]interface{}{
"key": shard,
"nonce": nonce,
}
r := c.c.NewRequest("PUT", "/v1/sys/rekey/verify")
if err := r.SetJSONBody(body); err != nil {
return nil, err
}
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var result RekeyVerificationUpdateResponse
err = resp.DecodeJSON(&result)
return &result, err
}
func (c *Sys) RekeyRecoveryKeyVerificationUpdate(shard, nonce string) (*RekeyVerificationUpdateResponse, error) {
body := map[string]interface{}{
"key": shard,
"nonce": nonce,
}
r := c.c.NewRequest("PUT", "/v1/sys/rekey-recovery-key/verify")
if err := r.SetJSONBody(body); err != nil {
return nil, err
}
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var result RekeyVerificationUpdateResponse
err = resp.DecodeJSON(&result)
return &result, err
}
type RekeyInitRequest struct { type RekeyInitRequest struct {
SecretShares int `json:"secret_shares"` SecretShares int `json:"secret_shares"`
SecretThreshold int `json:"secret_threshold"` SecretThreshold int `json:"secret_threshold"`
StoredShares int `json:"stored_shares"` StoredShares int `json:"stored_shares"`
PGPKeys []string `json:"pgp_keys"` PGPKeys []string `json:"pgp_keys"`
Backup bool Backup bool
RequireVerification bool `json:"require_verification"`
} }
type RekeyStatusResponse struct { type RekeyStatusResponse struct {
@ -185,6 +324,8 @@ type RekeyStatusResponse struct {
Required int `json:"required"` Required int `json:"required"`
PGPFingerprints []string `json:"pgp_fingerprints"` PGPFingerprints []string `json:"pgp_fingerprints"`
Backup bool `json:"backup"` Backup bool `json:"backup"`
VerificationRequired bool `json:"verification_required"`
VerificationNonce string `json:"verification_nonce"`
} }
type RekeyUpdateResponse struct { type RekeyUpdateResponse struct {
@ -194,6 +335,8 @@ type RekeyUpdateResponse struct {
KeysB64 []string `json:"keys_base64"` KeysB64 []string `json:"keys_base64"`
PGPFingerprints []string `json:"pgp_fingerprints"` PGPFingerprints []string `json:"pgp_fingerprints"`
Backup bool `json:"backup"` Backup bool `json:"backup"`
VerificationRequired bool `json:"verification_required"`
VerificationNonce string `json:"verification_nonce,omitempty"`
} }
type RekeyRetrieveResponse struct { type RekeyRetrieveResponse struct {
@ -201,3 +344,16 @@ type RekeyRetrieveResponse struct {
Keys map[string][]string `json:"keys"` Keys map[string][]string `json:"keys"`
KeysB64 map[string][]string `json:"keys_base64"` KeysB64 map[string][]string `json:"keys_base64"`
} }
type RekeyVerificationStatusResponse struct {
Nonce string `json:"nonce"`
Started bool `json:"started"`
T int `json:"t"`
N int `json:"n"`
Progress int `json:"progress"`
}
type RekeyVerificationUpdateResponse struct {
Nonce string `json:"nonce"`
Complete bool `json:"complete"`
}

View File

@ -1,10 +1,16 @@
package api package api
import "time" import (
"context"
"time"
)
func (c *Sys) Rotate() error { func (c *Sys) Rotate() error {
r := c.c.NewRequest("POST", "/v1/sys/rotate") r := c.c.NewRequest("POST", "/v1/sys/rotate")
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err == nil { if err == nil {
defer resp.Body.Close() defer resp.Body.Close()
} }
@ -13,7 +19,10 @@ func (c *Sys) Rotate() error {
func (c *Sys) KeyStatus() (*KeyStatus, error) { func (c *Sys) KeyStatus() (*KeyStatus, error) {
r := c.c.NewRequest("GET", "/v1/sys/key-status") r := c.c.NewRequest("GET", "/v1/sys/key-status")
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -1,5 +1,7 @@
package api package api
import "context"
func (c *Sys) SealStatus() (*SealStatusResponse, error) { func (c *Sys) SealStatus() (*SealStatusResponse, error) {
r := c.c.NewRequest("GET", "/v1/sys/seal-status") r := c.c.NewRequest("GET", "/v1/sys/seal-status")
return sealStatusRequest(c, r) return sealStatusRequest(c, r)
@ -7,7 +9,10 @@ func (c *Sys) SealStatus() (*SealStatusResponse, error) {
func (c *Sys) Seal() error { func (c *Sys) Seal() error {
r := c.c.NewRequest("PUT", "/v1/sys/seal") r := c.c.NewRequest("PUT", "/v1/sys/seal")
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err == nil { if err == nil {
defer resp.Body.Close() defer resp.Body.Close()
} }
@ -37,7 +42,9 @@ func (c *Sys) Unseal(shard string) (*SealStatusResponse, error) {
} }
func sealStatusRequest(c *Sys, r *Request) (*SealStatusResponse, error) { func sealStatusRequest(c *Sys, r *Request) (*SealStatusResponse, error) {
resp, err := c.c.RawRequest(r) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -1,8 +1,13 @@
package api package api
import "context"
func (c *Sys) StepDown() error { func (c *Sys) StepDown() error {
r := c.c.NewRequest("PUT", "/v1/sys/step-down") r := c.c.NewRequest("PUT", "/v1/sys/step-down")
resp, err := c.c.RawRequest(r)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err == nil { if err == nil {
defer resp.Body.Close() defer resp.Body.Close()
} }

View File

@ -10,6 +10,7 @@ import (
"crypto/sha1" "crypto/sha1"
"crypto/x509" "crypto/x509"
"encoding/pem" "encoding/pem"
"errors"
"fmt" "fmt"
"math/big" "math/big"
"strconv" "strconv"
@ -273,3 +274,28 @@ func ComparePublicKeys(key1Iface, key2Iface crypto.PublicKey) (bool, error) {
return false, fmt.Errorf("cannot compare key with type %T", key1Iface) return false, fmt.Errorf("cannot compare key with type %T", key1Iface)
} }
} }
// PasrsePublicKeyPEM is used to parse RSA and ECDSA public keys from PEMs
func ParsePublicKeyPEM(data []byte) (interface{}, error) {
block, data := pem.Decode(data)
if block != nil {
var rawKey interface{}
var err error
if rawKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
rawKey = cert.PublicKey
} else {
return nil, err
}
}
if rsaPublicKey, ok := rawKey.(*rsa.PublicKey); ok {
return rsaPublicKey, nil
}
if ecPublicKey, ok := rawKey.(*ecdsa.PublicKey); ok {
return ecPublicKey, nil
}
}
return nil, errors.New("data does not contain any valid RSA or ECDSA public keys")
}

View File

@ -43,7 +43,7 @@ const (
) )
// TLSUsage controls whether the intended usage of a *tls.Config // TLSUsage controls whether the intended usage of a *tls.Config
// returned from ParsedCertBundle.GetTLSConfig is for server use, // returned from ParsedCertBundle.getTLSConfig is for server use,
// client use, or both, which affects which values are set // client use, or both, which affects which values are set
type TLSUsage int type TLSUsage int
@ -523,7 +523,7 @@ func (p *ParsedCSRBundle) SetParsedPrivateKey(privateKey crypto.Signer, privateK
p.PrivateKeyBytes = privateKeyBytes p.PrivateKeyBytes = privateKeyBytes
} }
// GetTLSConfig returns a TLS config generally suitable for client // getTLSConfig returns a TLS config generally suitable for client
// authentication. The returned TLS config can be modified slightly // authentication. The returned TLS config can be modified slightly
// to be made suitable for a server requiring client authentication; // to be made suitable for a server requiring client authentication;
// specifically, you should set the value of ClientAuth in the returned // specifically, you should set the value of ClientAuth in the returned

View File

@ -0,0 +1,36 @@
package hclutil
import (
"fmt"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl/hcl/ast"
)
// CheckHCLKeys checks whether the keys in the AST list contains any of the valid keys provided.
func CheckHCLKeys(node ast.Node, valid []string) error {
var list *ast.ObjectList
switch n := node.(type) {
case *ast.ObjectList:
list = n
case *ast.ObjectType:
list = n.List
default:
return fmt.Errorf("cannot check HCL keys of type %T", n)
}
validMap := make(map[string]struct{}, len(valid))
for _, v := range valid {
validMap[v] = struct{}{}
}
var result error
for _, item := range list.Items {
key := item.Keys[0].Token.Value().(string)
if _, ok := validMap[key]; !ok {
result = multierror.Append(result, fmt.Errorf("invalid key %q on line %d", key, item.Assign.Line))
}
}
return result
}

View File

@ -28,7 +28,7 @@ func ParseDurationSecond(in interface{}) (time.Duration, error) {
} }
var err error var err error
// Look for a suffix otherwise its a plain second value // Look for a suffix otherwise its a plain second value
if strings.HasSuffix(inp, "s") || strings.HasSuffix(inp, "m") || strings.HasSuffix(inp, "h") { if strings.HasSuffix(inp, "s") || strings.HasSuffix(inp, "m") || strings.HasSuffix(inp, "h") || strings.HasSuffix(inp, "ms") {
dur, err = time.ParseDuration(inp) dur, err = time.ParseDuration(inp)
if err != nil { if err != nil {
return dur, err return dur, err

View File

@ -0,0 +1,9 @@
(The MIT License)
Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -0,0 +1,36 @@
// +build windows
package sequences
import (
"syscall"
"unsafe"
)
var (
kernel32Dll *syscall.LazyDLL = syscall.NewLazyDLL("Kernel32.dll")
setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode")
)
func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error {
const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4
var mode uint32
err := syscall.GetConsoleMode(syscall.Stdout, &mode)
if err != nil {
return err
}
if enable {
mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING
} else {
mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING
}
ret, _, err := setConsoleMode.Call(uintptr(unsafe.Pointer(stream)), uintptr(mode))
if ret == 0 {
return err
}
return nil
}

View File

@ -1,13 +1,24 @@
package runewidth package runewidth
import "os"
var ( var (
// EastAsianWidth will be set true if the current locale is CJK // EastAsianWidth will be set true if the current locale is CJK
EastAsianWidth = IsEastAsian() EastAsianWidth bool
// DefaultCondition is a condition in current locale // DefaultCondition is a condition in current locale
DefaultCondition = &Condition{EastAsianWidth} DefaultCondition = &Condition{EastAsianWidth}
) )
func init() {
env := os.Getenv("RUNEWIDTH_EASTASIAN")
if env == "" {
EastAsianWidth = IsEastAsian()
} else {
EastAsianWidth = env == "1"
}
}
type interval struct { type interval struct {
first rune first rune
last rune last rune
@ -55,6 +66,7 @@ var private = table{
var nonprint = table{ var nonprint = table{
{0x0000, 0x001F}, {0x007F, 0x009F}, {0x00AD, 0x00AD}, {0x0000, 0x001F}, {0x007F, 0x009F}, {0x00AD, 0x00AD},
{0x070F, 0x070F}, {0x180B, 0x180E}, {0x200B, 0x200F}, {0x070F, 0x070F}, {0x180B, 0x180E}, {0x200B, 0x200F},
{0x2028, 0x2029},
{0x202A, 0x202E}, {0x206A, 0x206F}, {0xD800, 0xDFFF}, {0x202A, 0x202E}, {0x206A, 0x206F}, {0xD800, 0xDFFF},
{0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFE, 0xFFFF}, {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFE, 0xFFFF},
} }

View File

@ -141,14 +141,16 @@ func dirWindows() (string, error) {
return home, nil return home, nil
} }
// Prefer standard environment variable USERPROFILE
if home := os.Getenv("USERPROFILE"); home != "" {
return home, nil
}
drive := os.Getenv("HOMEDRIVE") drive := os.Getenv("HOMEDRIVE")
path := os.Getenv("HOMEPATH") path := os.Getenv("HOMEPATH")
home := drive + path home := drive + path
if drive == "" || path == "" { if drive == "" || path == "" {
home = os.Getenv("USERPROFILE") return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank")
}
if home == "" {
return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank")
} }
return home, nil return home, nil

View File

@ -2,6 +2,8 @@ package mapstructure
import ( import (
"errors" "errors"
"fmt"
"net"
"reflect" "reflect"
"strconv" "strconv"
"strings" "strings"
@ -115,6 +117,50 @@ func StringToTimeDurationHookFunc() DecodeHookFunc {
} }
} }
// StringToIPHookFunc returns a DecodeHookFunc that converts
// strings to net.IP
func StringToIPHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{}) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(net.IP{}) {
return data, nil
}
// Convert it by parsing
ip := net.ParseIP(data.(string))
if ip == nil {
return net.IP{}, fmt.Errorf("failed parsing ip %v", data)
}
return ip, nil
}
}
// StringToIPNetHookFunc returns a DecodeHookFunc that converts
// strings to net.IPNet
func StringToIPNetHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{}) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(net.IPNet{}) {
return data, nil
}
// Convert it by parsing
_, net, err := net.ParseCIDR(data.(string))
return net, err
}
}
// StringToTimeHookFunc returns a DecodeHookFunc that converts // StringToTimeHookFunc returns a DecodeHookFunc that converts
// strings to time.Time. // strings to time.Time.
func StringToTimeHookFunc(layout string) DecodeHookFunc { func StringToTimeHookFunc(layout string) DecodeHookFunc {

View File

@ -224,16 +224,37 @@ func (d *Decoder) Decode(input interface{}) error {
// Decodes an unknown data type into a specific reflection value. // Decodes an unknown data type into a specific reflection value.
func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error {
var inputVal reflect.Value
if input != nil {
inputVal = reflect.ValueOf(input)
// We need to check here if input is a typed nil. Typed nils won't
// match the "input == nil" below so we check that here.
if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() {
input = nil
}
}
if input == nil { if input == nil {
// If the input is nil, then we don't set anything. // If the data is nil, then we don't set anything, unless ZeroFields is set
// to true.
if d.config.ZeroFields {
outVal.Set(reflect.Zero(outVal.Type()))
if d.config.Metadata != nil && name != "" {
d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
}
}
return nil return nil
} }
inputVal := reflect.ValueOf(input)
if !inputVal.IsValid() { if !inputVal.IsValid() {
// If the input value is invalid, then we just set the value // If the input value is invalid, then we just set the value
// to be the zero value. // to be the zero value.
outVal.Set(reflect.Zero(outVal.Type())) outVal.Set(reflect.Zero(outVal.Type()))
if d.config.Metadata != nil && name != "" {
d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
}
return nil return nil
} }
@ -249,8 +270,8 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e
} }
var err error var err error
inputKind := getKind(outVal) outputKind := getKind(outVal)
switch inputKind { switch outputKind {
case reflect.Bool: case reflect.Bool:
err = d.decodeBool(name, input, outVal) err = d.decodeBool(name, input, outVal)
case reflect.Interface: case reflect.Interface:
@ -277,7 +298,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e
err = d.decodeFunc(name, input, outVal) err = d.decodeFunc(name, input, outVal)
default: default:
// If we reached this point then we weren't able to decode it // If we reached this point then we weren't able to decode it
return fmt.Errorf("%s: unsupported type: %s", name, inputKind) return fmt.Errorf("%s: unsupported type: %s", name, outputKind)
} }
// If we reached here, then we successfully decoded SOMETHING, so // If we reached here, then we successfully decoded SOMETHING, so
@ -295,7 +316,16 @@ func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value)
if val.IsValid() && val.Elem().IsValid() { if val.IsValid() && val.Elem().IsValid() {
return d.decode(name, data, val.Elem()) return d.decode(name, data, val.Elem())
} }
dataVal := reflect.ValueOf(data) dataVal := reflect.ValueOf(data)
// If the input data is a pointer, and the assigned type is the dereference
// of that exact pointer, then indirect it so that we can assign it.
// Example: *string to string
if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() {
dataVal = reflect.Indirect(dataVal)
}
if !dataVal.IsValid() { if !dataVal.IsValid() {
dataVal = reflect.Zero(val.Type()) dataVal = reflect.Zero(val.Type())
} }
@ -312,7 +342,7 @@ func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value)
} }
func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
dataVal := reflect.ValueOf(data) dataVal := reflect.Indirect(reflect.ValueOf(data))
dataKind := getKind(dataVal) dataKind := getKind(dataVal)
converted := true converted := true
@ -364,7 +394,7 @@ func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value)
} }
func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
dataVal := reflect.ValueOf(data) dataVal := reflect.Indirect(reflect.ValueOf(data))
dataKind := getKind(dataVal) dataKind := getKind(dataVal)
dataType := dataVal.Type() dataType := dataVal.Type()
@ -406,7 +436,7 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er
} }
func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
dataVal := reflect.ValueOf(data) dataVal := reflect.Indirect(reflect.ValueOf(data))
dataKind := getKind(dataVal) dataKind := getKind(dataVal)
switch { switch {
@ -449,7 +479,7 @@ func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) e
} }
func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
dataVal := reflect.ValueOf(data) dataVal := reflect.Indirect(reflect.ValueOf(data))
dataKind := getKind(dataVal) dataKind := getKind(dataVal)
switch { switch {
@ -480,7 +510,7 @@ func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) e
} }
func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
dataVal := reflect.ValueOf(data) dataVal := reflect.Indirect(reflect.ValueOf(data))
dataKind := getKind(dataVal) dataKind := getKind(dataVal)
dataType := dataVal.Type() dataType := dataVal.Type()
@ -584,6 +614,20 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle
// Accumulate errors // Accumulate errors
errors := make([]string, 0) errors := make([]string, 0)
// If the input data is empty, then we just match what the input data is.
if dataVal.Len() == 0 {
if dataVal.IsNil() {
if !val.IsNil() {
val.Set(dataVal)
}
} else {
// Set to empty allocated value
val.Set(valMap)
}
return nil
}
for _, k := range dataVal.MapKeys() { for _, k := range dataVal.MapKeys() {
fieldName := fmt.Sprintf("%s[%s]", name, k) fieldName := fmt.Sprintf("%s[%s]", name, k)
@ -633,16 +677,28 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem())
} }
tagValue := f.Tag.Get(d.config.TagName)
tagParts := strings.Split(tagValue, ",")
// Determine the name of the key in the map // Determine the name of the key in the map
keyName := f.Name keyName := f.Name
tagValue := f.Tag.Get(d.config.TagName) if tagParts[0] != "" {
tagValue = strings.SplitN(tagValue, ",", 2)[0] if tagParts[0] == "-" {
if tagValue != "" {
if tagValue == "-" {
continue continue
} }
keyName = tagParts[0]
}
keyName = tagValue // If "squash" is specified in the tag, we squash the field down.
squash := false
for _, tag := range tagParts[1:] {
if tag == "squash" {
squash = true
break
}
}
if squash && v.Kind() != reflect.Struct {
return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
} }
switch v.Kind() { switch v.Kind() {
@ -662,7 +718,13 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
return err return err
} }
if squash {
for _, k := range vMap.MapKeys() {
valMap.SetMapIndex(k, vMap.MapIndex(k))
}
} else {
valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) valMap.SetMapIndex(reflect.ValueOf(keyName), vMap)
}
default: default:
valMap.SetMapIndex(reflect.ValueOf(keyName), v) valMap.SetMapIndex(reflect.ValueOf(keyName), v)
@ -677,11 +739,33 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
} }
func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error {
// If the input data is nil, then we want to just set the output
// pointer to be nil as well.
isNil := data == nil
if !isNil {
switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() {
case reflect.Chan,
reflect.Func,
reflect.Interface,
reflect.Map,
reflect.Ptr,
reflect.Slice:
isNil = v.IsNil()
}
}
if isNil {
if !val.IsNil() && val.CanSet() {
nilValue := reflect.New(val.Type()).Elem()
val.Set(nilValue)
}
return nil
}
// Create an element of the concrete (non pointer) type and decode // Create an element of the concrete (non pointer) type and decode
// into that. Then set the value of the pointer to this type. // into that. Then set the value of the pointer to this type.
valType := val.Type() valType := val.Type()
valElemType := valType.Elem() valElemType := valType.Elem()
if val.CanSet() { if val.CanSet() {
realVal := val realVal := val
if realVal.IsNil() || d.config.ZeroFields { if realVal.IsNil() || d.config.ZeroFields {
@ -723,18 +807,24 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
valSlice := val valSlice := val
if valSlice.IsNil() || d.config.ZeroFields { if valSlice.IsNil() || d.config.ZeroFields {
// Check input type
if dataValKind != reflect.Array && dataValKind != reflect.Slice {
if d.config.WeaklyTypedInput { if d.config.WeaklyTypedInput {
switch { switch {
// Slice and array we use the normal logic
case dataValKind == reflect.Slice, dataValKind == reflect.Array:
break
// Empty maps turn into empty slices // Empty maps turn into empty slices
case dataValKind == reflect.Map: case dataValKind == reflect.Map:
if dataVal.Len() == 0 { if dataVal.Len() == 0 {
val.Set(reflect.MakeSlice(sliceType, 0, 0)) val.Set(reflect.MakeSlice(sliceType, 0, 0))
return nil return nil
} }
// Create slice of maps of other sizes
return d.decodeSlice(name, []interface{}{data}, val)
case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8:
return d.decodeSlice(name, []byte(dataVal.String()), val) return d.decodeSlice(name, []byte(dataVal.String()), val)
// All other types we try to convert to the slice type // All other types we try to convert to the slice type
// and "lift" it into it. i.e. a string becomes a string slice. // and "lift" it into it. i.e. a string becomes a string slice.
default: default:
@ -742,11 +832,19 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
return d.decodeSlice(name, []interface{}{data}, val) return d.decodeSlice(name, []interface{}{data}, val)
} }
} }
// Check input type
if dataValKind != reflect.Array && dataValKind != reflect.Slice {
return fmt.Errorf( return fmt.Errorf(
"'%s': source data must be an array or slice, got %s", name, dataValKind) "'%s': source data must be an array or slice, got %s", name, dataValKind)
} }
// If the input value is empty, then don't allocate since non-nil != nil
if dataVal.Len() == 0 {
return nil
}
// Make a new slice to hold our result, same size as the original data. // Make a new slice to hold our result, same size as the original data.
valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
} }
@ -856,10 +954,29 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
} }
dataValKind := dataVal.Kind() dataValKind := dataVal.Kind()
if dataValKind != reflect.Map { switch dataValKind {
return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind) case reflect.Map:
return d.decodeStructFromMap(name, dataVal, val)
case reflect.Struct:
// Not the most efficient way to do this but we can optimize later if
// we want to. To convert from struct to struct we go to map first
// as an intermediary.
m := make(map[string]interface{})
mval := reflect.Indirect(reflect.ValueOf(&m))
if err := d.decodeMapFromStruct(name, dataVal, mval, mval); err != nil {
return err
} }
result := d.decodeStructFromMap(name, mval, val)
return result
default:
return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
}
}
func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error {
dataValType := dataVal.Type() dataValType := dataVal.Type()
if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
return fmt.Errorf( return fmt.Errorf(

View File

@ -11,10 +11,13 @@ import (
"time" "time"
) )
const tagKeyMultiline = "multiline"
type tomlOpts struct { type tomlOpts struct {
name string name string
comment string comment string
commented bool commented bool
multiline bool
include bool include bool
omitempty bool omitempty bool
} }
@ -187,7 +190,7 @@ func (e *Encoder) QuoteMapKeys(v bool) *Encoder {
// A = [ // A = [
// 1, // 1,
// 2, // 2,
// 3 // 3,
// ] // ]
func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder { func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder {
e.arraysOneElementPerLine = v e.arraysOneElementPerLine = v
@ -230,7 +233,12 @@ func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, er
if err != nil { if err != nil {
return nil, err return nil, err
} }
tval.SetWithComment(opts.name, opts.comment, opts.commented, val)
tval.SetWithOptions(opts.name, SetOptions{
Comment: opts.comment,
Commented: opts.commented,
Multiline: opts.multiline,
}, val)
} }
} }
case reflect.Map: case reflect.Map:
@ -559,7 +567,8 @@ func tomlOptions(vf reflect.StructField) tomlOpts {
comment = c comment = c
} }
commented, _ := strconv.ParseBool(vf.Tag.Get("commented")) commented, _ := strconv.ParseBool(vf.Tag.Get("commented"))
result := tomlOpts{name: vf.Name, comment: comment, commented: commented, include: true, omitempty: false} multiline, _ := strconv.ParseBool(vf.Tag.Get(tagKeyMultiline))
result := tomlOpts{name: vf.Name, comment: comment, commented: commented, multiline: multiline, include: true, omitempty: false}
if parse[0] != "" { if parse[0] != "" {
if parse[0] == "-" && len(parse) == 1 { if parse[0] == "-" && len(parse) == 1 {
result.include = false result.include = false

View File

@ -14,6 +14,7 @@ type tomlValue struct {
value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list
comment string comment string
commented bool commented bool
multiline bool
position Position position Position
} }
@ -175,6 +176,63 @@ func (t *Tree) GetDefault(key string, def interface{}) interface{} {
return val return val
} }
// SetOptions arguments are supplied to the SetWithOptions and SetPathWithOptions functions to modify marshalling behaviour.
// The default values within the struct are valid default options.
type SetOptions struct {
Comment string
Commented bool
Multiline bool
}
// SetWithOptions is the same as Set, but allows you to provide formatting
// instructions to the key, that will be used by Marshal().
func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) {
t.SetPathWithOptions(strings.Split(key, "."), opts, value)
}
// SetPathWithOptions is the same as SetPath, but allows you to provide
// formatting instructions to the key, that will be reused by Marshal().
func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) {
subtree := t
for _, intermediateKey := range keys[:len(keys)-1] {
nextTree, exists := subtree.values[intermediateKey]
if !exists {
nextTree = newTree()
subtree.values[intermediateKey] = nextTree // add new element here
}
switch node := nextTree.(type) {
case *Tree:
subtree = node
case []*Tree:
// go to most recent element
if len(node) == 0 {
// create element if it does not exist
subtree.values[intermediateKey] = append(node, newTree())
}
subtree = node[len(node)-1]
}
}
var toInsert interface{}
switch value.(type) {
case *Tree:
tt := value.(*Tree)
tt.comment = opts.Comment
toInsert = value
case []*Tree:
toInsert = value
case *tomlValue:
tt := value.(*tomlValue)
tt.comment = opts.Comment
toInsert = tt
default:
toInsert = &tomlValue{value: value, comment: opts.Comment, commented: opts.Commented, multiline: opts.Multiline}
}
subtree.values[keys[len(keys)-1]] = toInsert
}
// Set an element in the tree. // Set an element in the tree.
// Key is a dot-separated path (e.g. a.b.c). // Key is a dot-separated path (e.g. a.b.c).
// Creates all necessary intermediate trees, if needed. // Creates all necessary intermediate trees, if needed.

View File

@ -12,7 +12,41 @@ import (
"time" "time"
) )
// encodes a string to a TOML-compliant string value // Encodes a string to a TOML-compliant multi-line string value
// This function is a clone of the existing encodeTomlString function, except that whitespace characters
// are preserved. Quotation marks and backslashes are also not escaped.
func encodeMultilineTomlString(value string) string {
var b bytes.Buffer
for _, rr := range value {
switch rr {
case '\b':
b.WriteString(`\b`)
case '\t':
b.WriteString("\t")
case '\n':
b.WriteString("\n")
case '\f':
b.WriteString(`\f`)
case '\r':
b.WriteString("\r")
case '"':
b.WriteString(`"`)
case '\\':
b.WriteString(`\`)
default:
intRr := uint16(rr)
if intRr < 0x001F {
b.WriteString(fmt.Sprintf("\\u%0.4X", intRr))
} else {
b.WriteRune(rr)
}
}
}
return b.String()
}
// Encodes a string to a TOML-compliant string value
func encodeTomlString(value string) string { func encodeTomlString(value string) string {
var b bytes.Buffer var b bytes.Buffer
@ -45,6 +79,15 @@ func encodeTomlString(value string) string {
} }
func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElementPerLine bool) (string, error) { func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElementPerLine bool) (string, error) {
// this interface check is added to dereference the change made in the writeTo function.
// That change was made to allow this function to see formatting options.
tv, ok := v.(*tomlValue)
if ok {
v = tv.value
} else {
tv = &tomlValue{}
}
switch value := v.(type) { switch value := v.(type) {
case uint64: case uint64:
return strconv.FormatUint(value, 10), nil return strconv.FormatUint(value, 10), nil
@ -58,6 +101,9 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen
} }
return strings.ToLower(strconv.FormatFloat(value, 'f', -1, 32)), nil return strings.ToLower(strconv.FormatFloat(value, 'f', -1, 32)), nil
case string: case string:
if tv.multiline {
return "\"\"\"\n" + encodeMultilineTomlString(value) + "\"\"\"", nil
}
return "\"" + encodeTomlString(value) + "\"", nil return "\"" + encodeTomlString(value) + "\"", nil
case []byte: case []byte:
b, _ := v.([]byte) b, _ := v.([]byte)
@ -91,12 +137,10 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen
stringBuffer.WriteString("[\n") stringBuffer.WriteString("[\n")
for i, value := range values { for _, value := range values {
stringBuffer.WriteString(valueIndent) stringBuffer.WriteString(valueIndent)
stringBuffer.WriteString(value) stringBuffer.WriteString(value)
if i != len(values)-1 {
stringBuffer.WriteString(`,`) stringBuffer.WriteString(`,`)
}
stringBuffer.WriteString("\n") stringBuffer.WriteString("\n")
} }
@ -132,7 +176,7 @@ func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, a
return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
} }
repr, err := tomlValueStringRepresentation(v.value, indent, arraysOneElementPerLine) repr, err := tomlValueStringRepresentation(v, indent, arraysOneElementPerLine)
if err != nil { if err != nil {
return bytesCount, err return bytesCount, err
} }

View File

@ -30,25 +30,20 @@ func Glob(pattern, subj string) bool {
trailingGlob := strings.HasSuffix(pattern, GLOB) trailingGlob := strings.HasSuffix(pattern, GLOB)
end := len(parts) - 1 end := len(parts) - 1
// Go over the leading parts and ensure they match.
for i := 0; i < end; i++ {
idx := strings.Index(subj, parts[i])
switch i {
case 0:
// Check the first section. Requires special handling. // Check the first section. Requires special handling.
if !leadingGlob && idx != 0 { if !leadingGlob && !strings.HasPrefix(subj, parts[0]) {
return false return false
} }
default:
// Check that the middle parts match. // Go over the middle parts and ensure they match.
if idx < 0 { for i := 1; i < end; i++ {
if !strings.Contains(subj, parts[i]) {
return false return false
} }
}
// Trim evaluated text from subj as we loop over the pattern. // Trim evaluated text from subj as we loop over the pattern.
subj = subj[idx+len(parts[i]):] idx := strings.Index(subj, parts[i]) + len(parts[i])
subj = subj[idx:]
} }
// Reached the last section. Requires special handling. // Reached the last section. Requires special handling.

View File

@ -1,21 +0,0 @@
MIT License
Copyright (c) SendGrid 2016
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,456 +0,0 @@
// Package pester provides additional resiliency over the standard http client methods by
// allowing you to control concurrency, retries, and a backoff strategy.
package pester
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"sync"
"time"
)
//ErrUnexpectedMethod occurs when an http.Client method is unable to be mapped from a calling method in the pester client
var ErrUnexpectedMethod = errors.New("unexpected client method, must be one of Do, Get, Head, Post, or PostFrom")
// ErrReadingBody happens when we cannot read the body bytes
var ErrReadingBody = errors.New("error reading body")
// ErrReadingRequestBody happens when we cannot read the request body bytes
var ErrReadingRequestBody = errors.New("error reading request body")
// Client wraps the http client and exposes all the functionality of the http.Client.
// Additionally, Client provides pester specific values for handling resiliency.
type Client struct {
// wrap it to provide access to http built ins
hc *http.Client
Transport http.RoundTripper
CheckRedirect func(req *http.Request, via []*http.Request) error
Jar http.CookieJar
Timeout time.Duration
// pester specific
Concurrency int
MaxRetries int
Backoff BackoffStrategy
KeepLog bool
LogHook LogHook
SuccessReqNum int
SuccessRetryNum int
wg *sync.WaitGroup
sync.Mutex
ErrLog []ErrEntry
}
// ErrEntry is used to provide the LogString() data and is populated
// each time an error happens if KeepLog is set.
// ErrEntry.Retry is deprecated in favor of ErrEntry.Attempt
type ErrEntry struct {
Time time.Time
Method string
URL string
Verb string
Request int
Retry int
Attempt int
Err error
}
// result simplifies the channel communication for concurrent request handling
type result struct {
resp *http.Response
err error
req int
retry int
}
// params represents all the params needed to run http client calls and pester errors
type params struct {
method string
verb string
req *http.Request
url string
bodyType string
body io.Reader
data url.Values
}
var random *rand.Rand
func init() {
random = rand.New(rand.NewSource(time.Now().UnixNano()))
}
// New constructs a new DefaultClient with sensible default values
func New() *Client {
return &Client{
Concurrency: DefaultClient.Concurrency,
MaxRetries: DefaultClient.MaxRetries,
Backoff: DefaultClient.Backoff,
ErrLog: DefaultClient.ErrLog,
wg: &sync.WaitGroup{},
}
}
// NewExtendedClient allows you to pass in an http.Client that is previously set up
// and extends it to have Pester's features of concurrency and retries.
func NewExtendedClient(hc *http.Client) *Client {
c := New()
c.hc = hc
return c
}
// LogHook is used to log attempts as they happen. This function is never called,
// however, if KeepLog is set to true.
type LogHook func(e ErrEntry)
// BackoffStrategy is used to determine how long a retry request should wait until attempted
type BackoffStrategy func(retry int) time.Duration
// DefaultClient provides sensible defaults
var DefaultClient = &Client{Concurrency: 1, MaxRetries: 3, Backoff: DefaultBackoff, ErrLog: []ErrEntry{}}
// DefaultBackoff always returns 1 second
func DefaultBackoff(_ int) time.Duration {
return 1 * time.Second
}
// ExponentialBackoff returns ever increasing backoffs by a power of 2
func ExponentialBackoff(i int) time.Duration {
return time.Duration(1<<uint(i)) * time.Second
}
// ExponentialJitterBackoff returns ever increasing backoffs by a power of 2
// with +/- 0-33% to prevent sychronized reuqests.
func ExponentialJitterBackoff(i int) time.Duration {
return jitter(int(1 << uint(i)))
}
// LinearBackoff returns increasing durations, each a second longer than the last
func LinearBackoff(i int) time.Duration {
return time.Duration(i) * time.Second
}
// LinearJitterBackoff returns increasing durations, each a second longer than the last
// with +/- 0-33% to prevent sychronized reuqests.
func LinearJitterBackoff(i int) time.Duration {
return jitter(i)
}
// jitter keeps the +/- 0-33% logic in one place
func jitter(i int) time.Duration {
ms := i * 1000
maxJitter := ms / 3
// ms ± rand
ms += random.Intn(2*maxJitter) - maxJitter
// a jitter of 0 messes up the time.Tick chan
if ms <= 0 {
ms = 1
}
return time.Duration(ms) * time.Millisecond
}
// Wait blocks until all pester requests have returned
// Probably not that useful outside of testing.
func (c *Client) Wait() {
c.wg.Wait()
}
// pester provides all the logic of retries, concurrency, backoff, and logging
func (c *Client) pester(p params) (*http.Response, error) {
resultCh := make(chan result)
multiplexCh := make(chan result)
finishCh := make(chan struct{})
// track all requests that go out so we can close the late listener routine that closes late incoming response bodies
totalSentRequests := &sync.WaitGroup{}
totalSentRequests.Add(1)
defer totalSentRequests.Done()
allRequestsBackCh := make(chan struct{})
go func() {
totalSentRequests.Wait()
close(allRequestsBackCh)
}()
// GET calls should be idempotent and can make use
// of concurrency. Other verbs can mutate and should not
// make use of the concurrency feature
concurrency := c.Concurrency
if p.verb != "GET" {
concurrency = 1
}
c.Lock()
if c.hc == nil {
c.hc = &http.Client{}
c.hc.Transport = c.Transport
c.hc.CheckRedirect = c.CheckRedirect
c.hc.Jar = c.Jar
c.hc.Timeout = c.Timeout
}
c.Unlock()
// re-create the http client so we can leverage the std lib
httpClient := http.Client{
Transport: c.hc.Transport,
CheckRedirect: c.hc.CheckRedirect,
Jar: c.hc.Jar,
Timeout: c.hc.Timeout,
}
// if we have a request body, we need to save it for later
var originalRequestBody []byte
var originalBody []byte
var err error
if p.req != nil && p.req.Body != nil {
originalRequestBody, err = ioutil.ReadAll(p.req.Body)
if err != nil {
return nil, ErrReadingRequestBody
}
p.req.Body.Close()
}
if p.body != nil {
originalBody, err = ioutil.ReadAll(p.body)
if err != nil {
return nil, ErrReadingBody
}
}
AttemptLimit := c.MaxRetries
if AttemptLimit <= 0 {
AttemptLimit = 1
}
for req := 0; req < concurrency; req++ {
c.wg.Add(1)
totalSentRequests.Add(1)
go func(n int, p params) {
defer c.wg.Done()
defer totalSentRequests.Done()
var err error
for i := 1; i <= AttemptLimit; i++ {
c.wg.Add(1)
defer c.wg.Done()
select {
case <-finishCh:
return
default:
}
// rehydrate the body (it is drained each read)
if len(originalRequestBody) > 0 {
p.req.Body = ioutil.NopCloser(bytes.NewBuffer(originalRequestBody))
}
if len(originalBody) > 0 {
p.body = bytes.NewBuffer(originalBody)
}
var resp *http.Response
// route the calls
switch p.method {
case "Do":
resp, err = httpClient.Do(p.req)
case "Get":
resp, err = httpClient.Get(p.url)
case "Head":
resp, err = httpClient.Head(p.url)
case "Post":
resp, err = httpClient.Post(p.url, p.bodyType, p.body)
case "PostForm":
resp, err = httpClient.PostForm(p.url, p.data)
default:
err = ErrUnexpectedMethod
}
// Early return if we have a valid result
// Only retry (ie, continue the loop) on 5xx status codes
if err == nil && resp.StatusCode < 500 {
multiplexCh <- result{resp: resp, err: err, req: n, retry: i}
return
}
c.log(ErrEntry{
Time: time.Now(),
Method: p.method,
Verb: p.verb,
URL: p.url,
Request: n,
Retry: i + 1, // would remove, but would break backward compatibility
Attempt: i,
Err: err,
})
// if it is the last iteration, grab the result (which is an error at this point)
if i == AttemptLimit {
multiplexCh <- result{resp: resp, err: err}
return
}
//If the request has been cancelled, skip retries
if p.req != nil {
ctx := p.req.Context()
select {
case <-ctx.Done():
multiplexCh <- result{resp: resp, err: ctx.Err()}
return
default:
}
}
// if we are retrying, we should close this response body to free the fd
if resp != nil {
resp.Body.Close()
}
// prevent a 0 from causing the tick to block, pass additional microsecond
<-time.After(c.Backoff(i) + 1*time.Microsecond)
}
}(req, p)
}
// spin off the go routine so it can continually listen in on late results and close the response bodies
go func() {
gotFirstResult := false
for {
select {
case res := <-multiplexCh:
if !gotFirstResult {
gotFirstResult = true
close(finishCh)
resultCh <- res
} else if res.resp != nil {
// we only return one result to the caller; close all other response bodies that come back
// drain the body before close as to not prevent keepalive. see https://gist.github.com/mholt/eba0f2cc96658be0f717
io.Copy(ioutil.Discard, res.resp.Body)
res.resp.Body.Close()
}
case <-allRequestsBackCh:
// don't leave this goroutine running
return
}
}
}()
res := <-resultCh
c.Lock()
defer c.Unlock()
c.SuccessReqNum = res.req
c.SuccessRetryNum = res.retry
return res.resp, res.err
}
// LogString provides a string representation of the errors the client has seen
func (c *Client) LogString() string {
c.Lock()
defer c.Unlock()
var res string
for _, e := range c.ErrLog {
res += c.FormatError(e)
}
return res
}
// Format the Error to human readable string
func (c *Client) FormatError(e ErrEntry) string {
return fmt.Sprintf("%d %s [%s] %s request-%d retry-%d error: %s\n",
e.Time.Unix(), e.Method, e.Verb, e.URL, e.Request, e.Retry, e.Err)
}
// LogErrCount is a helper method used primarily for test validation
func (c *Client) LogErrCount() int {
c.Lock()
defer c.Unlock()
return len(c.ErrLog)
}
// EmbedHTTPClient allows you to extend an existing Pester client with an
// underlying http.Client, such as https://godoc.org/golang.org/x/oauth2/google#DefaultClient
func (c *Client) EmbedHTTPClient(hc *http.Client) {
c.hc = hc
}
func (c *Client) log(e ErrEntry) {
if c.KeepLog {
c.Lock()
defer c.Unlock()
c.ErrLog = append(c.ErrLog, e)
} else if c.LogHook != nil {
// NOTE: There is a possibility that Log Printing hook slows it down.
// but the consumer can always do the Job in a go-routine.
c.LogHook(e)
}
}
// Do provides the same functionality as http.Client.Do
func (c *Client) Do(req *http.Request) (resp *http.Response, err error) {
return c.pester(params{method: "Do", req: req, verb: req.Method, url: req.URL.String()})
}
// Get provides the same functionality as http.Client.Get
func (c *Client) Get(url string) (resp *http.Response, err error) {
return c.pester(params{method: "Get", url: url, verb: "GET"})
}
// Head provides the same functionality as http.Client.Head
func (c *Client) Head(url string) (resp *http.Response, err error) {
return c.pester(params{method: "Head", url: url, verb: "HEAD"})
}
// Post provides the same functionality as http.Client.Post
func (c *Client) Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) {
return c.pester(params{method: "Post", url: url, bodyType: bodyType, body: body, verb: "POST"})
}
// PostForm provides the same functionality as http.Client.PostForm
func (c *Client) PostForm(url string, data url.Values) (resp *http.Response, err error) {
return c.pester(params{method: "PostForm", url: url, data: data, verb: "POST"})
}
////////////////////////////////////////
// Provide self-constructing variants //
////////////////////////////////////////
// Do provides the same functionality as http.Client.Do and creates its own constructor
func Do(req *http.Request) (resp *http.Response, err error) {
c := New()
return c.Do(req)
}
// Get provides the same functionality as http.Client.Get and creates its own constructor
func Get(url string) (resp *http.Response, err error) {
c := New()
return c.Get(url)
}
// Head provides the same functionality as http.Client.Head and creates its own constructor
func Head(url string) (resp *http.Response, err error) {
c := New()
return c.Head(url)
}
// Post provides the same functionality as http.Client.Post and creates its own constructor
func Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) {
c := New()
return c.Post(url, bodyType, body)
}
// PostForm provides the same functionality as http.Client.PostForm and creates its own constructor
func PostForm(url string, data url.Values) (resp *http.Response, err error) {
c := New()
return c.PostForm(url, data)
}

View File

@ -41,14 +41,14 @@ type Entry struct {
// Message passed to Debug, Info, Warn, Error, Fatal or Panic // Message passed to Debug, Info, Warn, Error, Fatal or Panic
Message string Message string
// When formatter is called in entry.log(), an Buffer may be set to entry // When formatter is called in entry.log(), a Buffer may be set to entry
Buffer *bytes.Buffer Buffer *bytes.Buffer
} }
func NewEntry(logger *Logger) *Entry { func NewEntry(logger *Logger) *Entry {
return &Entry{ return &Entry{
Logger: logger, Logger: logger,
// Default is three fields, give a little extra room // Default is five fields, give a little extra room
Data: make(Fields, 5), Data: make(Fields, 5),
} }
} }
@ -83,14 +83,28 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
for k, v := range fields { for k, v := range fields {
data[k] = v data[k] = v
} }
return &Entry{Logger: entry.Logger, Data: data} return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time}
}
// Overrides the time of the Entry.
func (entry *Entry) WithTime(t time.Time) *Entry {
return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t}
} }
// This function is not declared with a pointer value because otherwise // This function is not declared with a pointer value because otherwise
// race conditions will occur when using multiple goroutines // race conditions will occur when using multiple goroutines
func (entry Entry) log(level Level, msg string) { func (entry Entry) log(level Level, msg string) {
var buffer *bytes.Buffer var buffer *bytes.Buffer
// Default to now, but allow users to override if they want.
//
// We don't have to worry about polluting future calls to Entry#log()
// with this assignment because this function is declared with a
// non-pointer receiver.
if entry.Time.IsZero() {
entry.Time = time.Now() entry.Time = time.Now()
}
entry.Level = level entry.Level = level
entry.Message = msg entry.Message = msg
@ -113,21 +127,19 @@ func (entry Entry) log(level Level, msg string) {
} }
} }
// This function is not declared with a pointer value because otherwise func (entry *Entry) fireHooks() {
// race conditions will occur when using multiple goroutines
func (entry Entry) fireHooks() {
entry.Logger.mu.Lock() entry.Logger.mu.Lock()
defer entry.Logger.mu.Unlock() defer entry.Logger.mu.Unlock()
err := entry.Logger.Hooks.Fire(entry.Level, &entry) err := entry.Logger.Hooks.Fire(entry.Level, entry)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
} }
} }
func (entry *Entry) write() { func (entry *Entry) write() {
serialized, err := entry.Logger.Formatter.Format(entry)
entry.Logger.mu.Lock() entry.Logger.mu.Lock()
defer entry.Logger.mu.Unlock() defer entry.Logger.mu.Unlock()
serialized, err := entry.Logger.Formatter.Format(entry)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
} else { } else {
@ -139,7 +151,7 @@ func (entry *Entry) write() {
} }
func (entry *Entry) Debug(args ...interface{}) { func (entry *Entry) Debug(args ...interface{}) {
if entry.Logger.level() >= DebugLevel { if entry.Logger.IsLevelEnabled(DebugLevel) {
entry.log(DebugLevel, fmt.Sprint(args...)) entry.log(DebugLevel, fmt.Sprint(args...))
} }
} }
@ -149,13 +161,13 @@ func (entry *Entry) Print(args ...interface{}) {
} }
func (entry *Entry) Info(args ...interface{}) { func (entry *Entry) Info(args ...interface{}) {
if entry.Logger.level() >= InfoLevel { if entry.Logger.IsLevelEnabled(InfoLevel) {
entry.log(InfoLevel, fmt.Sprint(args...)) entry.log(InfoLevel, fmt.Sprint(args...))
} }
} }
func (entry *Entry) Warn(args ...interface{}) { func (entry *Entry) Warn(args ...interface{}) {
if entry.Logger.level() >= WarnLevel { if entry.Logger.IsLevelEnabled(WarnLevel) {
entry.log(WarnLevel, fmt.Sprint(args...)) entry.log(WarnLevel, fmt.Sprint(args...))
} }
} }
@ -165,20 +177,20 @@ func (entry *Entry) Warning(args ...interface{}) {
} }
func (entry *Entry) Error(args ...interface{}) { func (entry *Entry) Error(args ...interface{}) {
if entry.Logger.level() >= ErrorLevel { if entry.Logger.IsLevelEnabled(ErrorLevel) {
entry.log(ErrorLevel, fmt.Sprint(args...)) entry.log(ErrorLevel, fmt.Sprint(args...))
} }
} }
func (entry *Entry) Fatal(args ...interface{}) { func (entry *Entry) Fatal(args ...interface{}) {
if entry.Logger.level() >= FatalLevel { if entry.Logger.IsLevelEnabled(FatalLevel) {
entry.log(FatalLevel, fmt.Sprint(args...)) entry.log(FatalLevel, fmt.Sprint(args...))
} }
Exit(1) Exit(1)
} }
func (entry *Entry) Panic(args ...interface{}) { func (entry *Entry) Panic(args ...interface{}) {
if entry.Logger.level() >= PanicLevel { if entry.Logger.IsLevelEnabled(PanicLevel) {
entry.log(PanicLevel, fmt.Sprint(args...)) entry.log(PanicLevel, fmt.Sprint(args...))
} }
panic(fmt.Sprint(args...)) panic(fmt.Sprint(args...))
@ -187,13 +199,13 @@ func (entry *Entry) Panic(args ...interface{}) {
// Entry Printf family functions // Entry Printf family functions
func (entry *Entry) Debugf(format string, args ...interface{}) { func (entry *Entry) Debugf(format string, args ...interface{}) {
if entry.Logger.level() >= DebugLevel { if entry.Logger.IsLevelEnabled(DebugLevel) {
entry.Debug(fmt.Sprintf(format, args...)) entry.Debug(fmt.Sprintf(format, args...))
} }
} }
func (entry *Entry) Infof(format string, args ...interface{}) { func (entry *Entry) Infof(format string, args ...interface{}) {
if entry.Logger.level() >= InfoLevel { if entry.Logger.IsLevelEnabled(InfoLevel) {
entry.Info(fmt.Sprintf(format, args...)) entry.Info(fmt.Sprintf(format, args...))
} }
} }
@ -203,7 +215,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) {
} }
func (entry *Entry) Warnf(format string, args ...interface{}) { func (entry *Entry) Warnf(format string, args ...interface{}) {
if entry.Logger.level() >= WarnLevel { if entry.Logger.IsLevelEnabled(WarnLevel) {
entry.Warn(fmt.Sprintf(format, args...)) entry.Warn(fmt.Sprintf(format, args...))
} }
} }
@ -213,20 +225,20 @@ func (entry *Entry) Warningf(format string, args ...interface{}) {
} }
func (entry *Entry) Errorf(format string, args ...interface{}) { func (entry *Entry) Errorf(format string, args ...interface{}) {
if entry.Logger.level() >= ErrorLevel { if entry.Logger.IsLevelEnabled(ErrorLevel) {
entry.Error(fmt.Sprintf(format, args...)) entry.Error(fmt.Sprintf(format, args...))
} }
} }
func (entry *Entry) Fatalf(format string, args ...interface{}) { func (entry *Entry) Fatalf(format string, args ...interface{}) {
if entry.Logger.level() >= FatalLevel { if entry.Logger.IsLevelEnabled(FatalLevel) {
entry.Fatal(fmt.Sprintf(format, args...)) entry.Fatal(fmt.Sprintf(format, args...))
} }
Exit(1) Exit(1)
} }
func (entry *Entry) Panicf(format string, args ...interface{}) { func (entry *Entry) Panicf(format string, args ...interface{}) {
if entry.Logger.level() >= PanicLevel { if entry.Logger.IsLevelEnabled(PanicLevel) {
entry.Panic(fmt.Sprintf(format, args...)) entry.Panic(fmt.Sprintf(format, args...))
} }
} }
@ -234,13 +246,13 @@ func (entry *Entry) Panicf(format string, args ...interface{}) {
// Entry Println family functions // Entry Println family functions
func (entry *Entry) Debugln(args ...interface{}) { func (entry *Entry) Debugln(args ...interface{}) {
if entry.Logger.level() >= DebugLevel { if entry.Logger.IsLevelEnabled(DebugLevel) {
entry.Debug(entry.sprintlnn(args...)) entry.Debug(entry.sprintlnn(args...))
} }
} }
func (entry *Entry) Infoln(args ...interface{}) { func (entry *Entry) Infoln(args ...interface{}) {
if entry.Logger.level() >= InfoLevel { if entry.Logger.IsLevelEnabled(InfoLevel) {
entry.Info(entry.sprintlnn(args...)) entry.Info(entry.sprintlnn(args...))
} }
} }
@ -250,7 +262,7 @@ func (entry *Entry) Println(args ...interface{}) {
} }
func (entry *Entry) Warnln(args ...interface{}) { func (entry *Entry) Warnln(args ...interface{}) {
if entry.Logger.level() >= WarnLevel { if entry.Logger.IsLevelEnabled(WarnLevel) {
entry.Warn(entry.sprintlnn(args...)) entry.Warn(entry.sprintlnn(args...))
} }
} }
@ -260,20 +272,20 @@ func (entry *Entry) Warningln(args ...interface{}) {
} }
func (entry *Entry) Errorln(args ...interface{}) { func (entry *Entry) Errorln(args ...interface{}) {
if entry.Logger.level() >= ErrorLevel { if entry.Logger.IsLevelEnabled(ErrorLevel) {
entry.Error(entry.sprintlnn(args...)) entry.Error(entry.sprintlnn(args...))
} }
} }
func (entry *Entry) Fatalln(args ...interface{}) { func (entry *Entry) Fatalln(args ...interface{}) {
if entry.Logger.level() >= FatalLevel { if entry.Logger.IsLevelEnabled(FatalLevel) {
entry.Fatal(entry.sprintlnn(args...)) entry.Fatal(entry.sprintlnn(args...))
} }
Exit(1) Exit(1)
} }
func (entry *Entry) Panicln(args ...interface{}) { func (entry *Entry) Panicln(args ...interface{}) {
if entry.Logger.level() >= PanicLevel { if entry.Logger.IsLevelEnabled(PanicLevel) {
entry.Panic(entry.sprintlnn(args...)) entry.Panic(entry.sprintlnn(args...))
} }
} }

View File

@ -2,6 +2,7 @@ package logrus
import ( import (
"io" "io"
"time"
) )
var ( var (
@ -15,37 +16,32 @@ func StandardLogger() *Logger {
// SetOutput sets the standard logger output. // SetOutput sets the standard logger output.
func SetOutput(out io.Writer) { func SetOutput(out io.Writer) {
std.mu.Lock() std.SetOutput(out)
defer std.mu.Unlock()
std.Out = out
} }
// SetFormatter sets the standard logger formatter. // SetFormatter sets the standard logger formatter.
func SetFormatter(formatter Formatter) { func SetFormatter(formatter Formatter) {
std.mu.Lock() std.SetFormatter(formatter)
defer std.mu.Unlock()
std.Formatter = formatter
} }
// SetLevel sets the standard logger level. // SetLevel sets the standard logger level.
func SetLevel(level Level) { func SetLevel(level Level) {
std.mu.Lock()
defer std.mu.Unlock()
std.SetLevel(level) std.SetLevel(level)
} }
// GetLevel returns the standard logger level. // GetLevel returns the standard logger level.
func GetLevel() Level { func GetLevel() Level {
std.mu.Lock() return std.GetLevel()
defer std.mu.Unlock() }
return std.level()
// IsLevelEnabled checks if the log level of the standard logger is greater than the level param
func IsLevelEnabled(level Level) bool {
return std.IsLevelEnabled(level)
} }
// AddHook adds a hook to the standard logger hooks. // AddHook adds a hook to the standard logger hooks.
func AddHook(hook Hook) { func AddHook(hook Hook) {
std.mu.Lock() std.AddHook(hook)
defer std.mu.Unlock()
std.Hooks.Add(hook)
} }
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. // WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
@ -72,6 +68,15 @@ func WithFields(fields Fields) *Entry {
return std.WithFields(fields) return std.WithFields(fields)
} }
// WithTime creats an entry from the standard logger and overrides the time of
// logs generated with it.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithTime(t time.Time) *Entry {
return std.WithTime(t)
}
// Debug logs a message at level Debug on the standard logger. // Debug logs a message at level Debug on the standard logger.
func Debug(args ...interface{}) { func Debug(args ...interface{}) {
std.Debug(args...) std.Debug(args...)
@ -107,7 +112,7 @@ func Panic(args ...interface{}) {
std.Panic(args...) std.Panic(args...)
} }
// Fatal logs a message at level Fatal on the standard logger. // Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
func Fatal(args ...interface{}) { func Fatal(args ...interface{}) {
std.Fatal(args...) std.Fatal(args...)
} }
@ -147,7 +152,7 @@ func Panicf(format string, args ...interface{}) {
std.Panicf(format, args...) std.Panicf(format, args...)
} }
// Fatalf logs a message at level Fatal on the standard logger. // Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
func Fatalf(format string, args ...interface{}) { func Fatalf(format string, args ...interface{}) {
std.Fatalf(format, args...) std.Fatalf(format, args...)
} }
@ -187,7 +192,7 @@ func Panicln(args ...interface{}) {
std.Panicln(args...) std.Panicln(args...)
} }
// Fatalln logs a message at level Fatal on the standard logger. // Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
func Fatalln(args ...interface{}) { func Fatalln(args ...interface{}) {
std.Fatalln(args...) std.Fatalln(args...)
} }

View File

@ -30,16 +30,22 @@ type Formatter interface {
// //
// It's not exported because it's still using Data in an opinionated way. It's to // It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters. // avoid code duplication between the two default formatters.
func prefixFieldClashes(data Fields) { func prefixFieldClashes(data Fields, fieldMap FieldMap) {
if t, ok := data["time"]; ok { timeKey := fieldMap.resolve(FieldKeyTime)
data["fields.time"] = t if t, ok := data[timeKey]; ok {
data["fields."+timeKey] = t
delete(data, timeKey)
} }
if m, ok := data["msg"]; ok { msgKey := fieldMap.resolve(FieldKeyMsg)
data["fields.msg"] = m if m, ok := data[msgKey]; ok {
data["fields."+msgKey] = m
delete(data, msgKey)
} }
if l, ok := data["level"]; ok { levelKey := fieldMap.resolve(FieldKeyLevel)
data["fields.level"] = l if l, ok := data[levelKey]; ok {
data["fields."+levelKey] = l
delete(data, levelKey)
} }
} }

View File

@ -1,6 +1,7 @@
package logrus package logrus
import ( import (
"bytes"
"encoding/json" "encoding/json"
"fmt" "fmt"
) )
@ -33,6 +34,9 @@ type JSONFormatter struct {
// DisableTimestamp allows disabling automatic timestamps in output // DisableTimestamp allows disabling automatic timestamps in output
DisableTimestamp bool DisableTimestamp bool
// DataKey allows users to put all the log entry parameters into a nested dictionary at a given key.
DataKey string
// FieldMap allows users to customize the names of keys for default fields. // FieldMap allows users to customize the names of keys for default fields.
// As an example: // As an example:
// formatter := &JSONFormatter{ // formatter := &JSONFormatter{
@ -43,6 +47,9 @@ type JSONFormatter struct {
// }, // },
// } // }
FieldMap FieldMap FieldMap FieldMap
// PrettyPrint will indent all json logs
PrettyPrint bool
} }
// Format renders a single log entry // Format renders a single log entry
@ -58,7 +65,14 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data[k] = v data[k] = v
} }
} }
prefixFieldClashes(data)
if f.DataKey != "" {
newData := make(Fields, 4)
newData[f.DataKey] = data
data = newData
}
prefixFieldClashes(data, f.FieldMap)
timestampFormat := f.TimestampFormat timestampFormat := f.TimestampFormat
if timestampFormat == "" { if timestampFormat == "" {
@ -71,9 +85,20 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
serialized, err := json.Marshal(data) var b *bytes.Buffer
if err != nil { if entry.Buffer != nil {
b = entry.Buffer
} else {
b = &bytes.Buffer{}
}
encoder := json.NewEncoder(b)
if f.PrettyPrint {
encoder.SetIndent("", " ")
}
if err := encoder.Encode(data); err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
} }
return append(serialized, '\n'), nil
return b.Bytes(), nil
} }

View File

@ -5,12 +5,13 @@ import (
"os" "os"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time"
) )
type Logger struct { type Logger struct {
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
// file, or leave it default which is `os.Stderr`. You can also set this to // file, or leave it default which is `os.Stderr`. You can also set this to
// something more adventorous, such as logging to Kafka. // something more adventurous, such as logging to Kafka.
Out io.Writer Out io.Writer
// Hooks for the logger instance. These allow firing events based on logging // Hooks for the logger instance. These allow firing events based on logging
// levels and log entries. For example, to send errors to an error tracking // levels and log entries. For example, to send errors to an error tracking
@ -84,11 +85,12 @@ func (logger *Logger) newEntry() *Entry {
} }
func (logger *Logger) releaseEntry(entry *Entry) { func (logger *Logger) releaseEntry(entry *Entry) {
entry.Data = map[string]interface{}{}
logger.entryPool.Put(entry) logger.entryPool.Put(entry)
} }
// Adds a field to the log entry, note that it doesn't log until you call // Adds a field to the log entry, note that it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. // Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry.
// If you want multiple fields, use `WithFields`. // If you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry { func (logger *Logger) WithField(key string, value interface{}) *Entry {
entry := logger.newEntry() entry := logger.newEntry()
@ -112,8 +114,15 @@ func (logger *Logger) WithError(err error) *Entry {
return entry.WithError(err) return entry.WithError(err)
} }
// Overrides the time of the log entry.
func (logger *Logger) WithTime(t time.Time) *Entry {
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithTime(t)
}
func (logger *Logger) Debugf(format string, args ...interface{}) { func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.level() >= DebugLevel { if logger.IsLevelEnabled(DebugLevel) {
entry := logger.newEntry() entry := logger.newEntry()
entry.Debugf(format, args...) entry.Debugf(format, args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
@ -121,7 +130,7 @@ func (logger *Logger) Debugf(format string, args ...interface{}) {
} }
func (logger *Logger) Infof(format string, args ...interface{}) { func (logger *Logger) Infof(format string, args ...interface{}) {
if logger.level() >= InfoLevel { if logger.IsLevelEnabled(InfoLevel) {
entry := logger.newEntry() entry := logger.newEntry()
entry.Infof(format, args...) entry.Infof(format, args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
@ -135,7 +144,7 @@ func (logger *Logger) Printf(format string, args ...interface{}) {
} }
func (logger *Logger) Warnf(format string, args ...interface{}) { func (logger *Logger) Warnf(format string, args ...interface{}) {
if logger.level() >= WarnLevel { if logger.IsLevelEnabled(WarnLevel) {
entry := logger.newEntry() entry := logger.newEntry()
entry.Warnf(format, args...) entry.Warnf(format, args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
@ -143,7 +152,7 @@ func (logger *Logger) Warnf(format string, args ...interface{}) {
} }
func (logger *Logger) Warningf(format string, args ...interface{}) { func (logger *Logger) Warningf(format string, args ...interface{}) {
if logger.level() >= WarnLevel { if logger.IsLevelEnabled(WarnLevel) {
entry := logger.newEntry() entry := logger.newEntry()
entry.Warnf(format, args...) entry.Warnf(format, args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
@ -151,7 +160,7 @@ func (logger *Logger) Warningf(format string, args ...interface{}) {
} }
func (logger *Logger) Errorf(format string, args ...interface{}) { func (logger *Logger) Errorf(format string, args ...interface{}) {
if logger.level() >= ErrorLevel { if logger.IsLevelEnabled(ErrorLevel) {
entry := logger.newEntry() entry := logger.newEntry()
entry.Errorf(format, args...) entry.Errorf(format, args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
@ -159,7 +168,7 @@ func (logger *Logger) Errorf(format string, args ...interface{}) {
} }
func (logger *Logger) Fatalf(format string, args ...interface{}) { func (logger *Logger) Fatalf(format string, args ...interface{}) {
if logger.level() >= FatalLevel { if logger.IsLevelEnabled(FatalLevel) {
entry := logger.newEntry() entry := logger.newEntry()
entry.Fatalf(format, args...) entry.Fatalf(format, args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
@ -168,7 +177,7 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) {
} }
func (logger *Logger) Panicf(format string, args ...interface{}) { func (logger *Logger) Panicf(format string, args ...interface{}) {
if logger.level() >= PanicLevel { if logger.IsLevelEnabled(PanicLevel) {
entry := logger.newEntry() entry := logger.newEntry()
entry.Panicf(format, args...) entry.Panicf(format, args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
@ -176,7 +185,7 @@ func (logger *Logger) Panicf(format string, args ...interface{}) {
} }
func (logger *Logger) Debug(args ...interface{}) { func (logger *Logger) Debug(args ...interface{}) {
if logger.level() >= DebugLevel { if logger.IsLevelEnabled(DebugLevel) {
entry := logger.newEntry() entry := logger.newEntry()
entry.Debug(args...) entry.Debug(args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
@ -184,7 +193,7 @@ func (logger *Logger) Debug(args ...interface{}) {
} }
func (logger *Logger) Info(args ...interface{}) { func (logger *Logger) Info(args ...interface{}) {
if logger.level() >= InfoLevel { if logger.IsLevelEnabled(InfoLevel) {
entry := logger.newEntry() entry := logger.newEntry()
entry.Info(args...) entry.Info(args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
@ -198,7 +207,7 @@ func (logger *Logger) Print(args ...interface{}) {
} }
func (logger *Logger) Warn(args ...interface{}) { func (logger *Logger) Warn(args ...interface{}) {
if logger.level() >= WarnLevel { if logger.IsLevelEnabled(WarnLevel) {
entry := logger.newEntry() entry := logger.newEntry()
entry.Warn(args...) entry.Warn(args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
@ -206,7 +215,7 @@ func (logger *Logger) Warn(args ...interface{}) {
} }
func (logger *Logger) Warning(args ...interface{}) { func (logger *Logger) Warning(args ...interface{}) {
if logger.level() >= WarnLevel { if logger.IsLevelEnabled(WarnLevel) {
entry := logger.newEntry() entry := logger.newEntry()
entry.Warn(args...) entry.Warn(args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
@ -214,7 +223,7 @@ func (logger *Logger) Warning(args ...interface{}) {
} }
func (logger *Logger) Error(args ...interface{}) { func (logger *Logger) Error(args ...interface{}) {
if logger.level() >= ErrorLevel { if logger.IsLevelEnabled(ErrorLevel) {
entry := logger.newEntry() entry := logger.newEntry()
entry.Error(args...) entry.Error(args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
@ -222,7 +231,7 @@ func (logger *Logger) Error(args ...interface{}) {
} }
func (logger *Logger) Fatal(args ...interface{}) { func (logger *Logger) Fatal(args ...interface{}) {
if logger.level() >= FatalLevel { if logger.IsLevelEnabled(FatalLevel) {
entry := logger.newEntry() entry := logger.newEntry()
entry.Fatal(args...) entry.Fatal(args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
@ -231,7 +240,7 @@ func (logger *Logger) Fatal(args ...interface{}) {
} }
func (logger *Logger) Panic(args ...interface{}) { func (logger *Logger) Panic(args ...interface{}) {
if logger.level() >= PanicLevel { if logger.IsLevelEnabled(PanicLevel) {
entry := logger.newEntry() entry := logger.newEntry()
entry.Panic(args...) entry.Panic(args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
@ -239,7 +248,7 @@ func (logger *Logger) Panic(args ...interface{}) {
} }
func (logger *Logger) Debugln(args ...interface{}) { func (logger *Logger) Debugln(args ...interface{}) {
if logger.level() >= DebugLevel { if logger.IsLevelEnabled(DebugLevel) {
entry := logger.newEntry() entry := logger.newEntry()
entry.Debugln(args...) entry.Debugln(args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
@ -247,7 +256,7 @@ func (logger *Logger) Debugln(args ...interface{}) {
} }
func (logger *Logger) Infoln(args ...interface{}) { func (logger *Logger) Infoln(args ...interface{}) {
if logger.level() >= InfoLevel { if logger.IsLevelEnabled(InfoLevel) {
entry := logger.newEntry() entry := logger.newEntry()
entry.Infoln(args...) entry.Infoln(args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
@ -261,7 +270,7 @@ func (logger *Logger) Println(args ...interface{}) {
} }
func (logger *Logger) Warnln(args ...interface{}) { func (logger *Logger) Warnln(args ...interface{}) {
if logger.level() >= WarnLevel { if logger.IsLevelEnabled(WarnLevel) {
entry := logger.newEntry() entry := logger.newEntry()
entry.Warnln(args...) entry.Warnln(args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
@ -269,7 +278,7 @@ func (logger *Logger) Warnln(args ...interface{}) {
} }
func (logger *Logger) Warningln(args ...interface{}) { func (logger *Logger) Warningln(args ...interface{}) {
if logger.level() >= WarnLevel { if logger.IsLevelEnabled(WarnLevel) {
entry := logger.newEntry() entry := logger.newEntry()
entry.Warnln(args...) entry.Warnln(args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
@ -277,7 +286,7 @@ func (logger *Logger) Warningln(args ...interface{}) {
} }
func (logger *Logger) Errorln(args ...interface{}) { func (logger *Logger) Errorln(args ...interface{}) {
if logger.level() >= ErrorLevel { if logger.IsLevelEnabled(ErrorLevel) {
entry := logger.newEntry() entry := logger.newEntry()
entry.Errorln(args...) entry.Errorln(args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
@ -285,7 +294,7 @@ func (logger *Logger) Errorln(args ...interface{}) {
} }
func (logger *Logger) Fatalln(args ...interface{}) { func (logger *Logger) Fatalln(args ...interface{}) {
if logger.level() >= FatalLevel { if logger.IsLevelEnabled(FatalLevel) {
entry := logger.newEntry() entry := logger.newEntry()
entry.Fatalln(args...) entry.Fatalln(args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
@ -294,7 +303,7 @@ func (logger *Logger) Fatalln(args ...interface{}) {
} }
func (logger *Logger) Panicln(args ...interface{}) { func (logger *Logger) Panicln(args ...interface{}) {
if logger.level() >= PanicLevel { if logger.IsLevelEnabled(PanicLevel) {
entry := logger.newEntry() entry := logger.newEntry()
entry.Panicln(args...) entry.Panicln(args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
@ -312,12 +321,47 @@ func (logger *Logger) level() Level {
return Level(atomic.LoadUint32((*uint32)(&logger.Level))) return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
} }
// SetLevel sets the logger level.
func (logger *Logger) SetLevel(level Level) { func (logger *Logger) SetLevel(level Level) {
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
} }
// GetLevel returns the logger level.
func (logger *Logger) GetLevel() Level {
return logger.level()
}
// AddHook adds a hook to the logger hooks.
func (logger *Logger) AddHook(hook Hook) { func (logger *Logger) AddHook(hook Hook) {
logger.mu.Lock() logger.mu.Lock()
defer logger.mu.Unlock() defer logger.mu.Unlock()
logger.Hooks.Add(hook) logger.Hooks.Add(hook)
} }
// IsLevelEnabled checks if the log level of the logger is greater than the level param
func (logger *Logger) IsLevelEnabled(level Level) bool {
return logger.level() >= level
}
// SetFormatter sets the logger formatter.
func (logger *Logger) SetFormatter(formatter Formatter) {
logger.mu.Lock()
defer logger.mu.Unlock()
logger.Formatter = formatter
}
// SetOutput sets the logger output.
func (logger *Logger) SetOutput(output io.Writer) {
logger.mu.Lock()
defer logger.mu.Unlock()
logger.Out = output
}
// ReplaceHooks replaces the logger hooks and returns the old ones
func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks {
logger.mu.Lock()
oldHooks := logger.Hooks
logger.Hooks = hooks
logger.mu.Unlock()
return oldHooks
}

View File

@ -140,4 +140,11 @@ type FieldLogger interface {
Errorln(args ...interface{}) Errorln(args ...interface{})
Fatalln(args ...interface{}) Fatalln(args ...interface{})
Panicln(args ...interface{}) Panicln(args ...interface{})
// IsDebugEnabled() bool
// IsInfoEnabled() bool
// IsWarnEnabled() bool
// IsErrorEnabled() bool
// IsFatalEnabled() bool
// IsPanicEnabled() bool
} }

View File

@ -0,0 +1,13 @@
// Based on ssh/terminal:
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build appengine
package logrus
import "io"
func initTerminal(w io.Writer) {
}

View File

@ -1,10 +1,17 @@
// +build darwin freebsd openbsd netbsd dragonfly // +build darwin freebsd openbsd netbsd dragonfly
// +build !appengine,!gopherjs // +build !appengine,!js
package logrus package logrus
import "golang.org/x/sys/unix" import (
"io"
"golang.org/x/sys/unix"
)
const ioctlReadTermios = unix.TIOCGETA const ioctlReadTermios = unix.TIOCGETA
type Termios unix.Termios type Termios unix.Termios
func initTerminal(w io.Writer) {
}

View File

@ -1,4 +1,4 @@
// +build appengine gopherjs // +build appengine
package logrus package logrus

11
vendor/github.com/sirupsen/logrus/terminal_check_js.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// +build js
package logrus
import (
"io"
)
func checkIfTerminal(w io.Writer) bool {
return false
}

View File

@ -1,4 +1,4 @@
// +build !appengine,!gopherjs // +build !appengine,!js,!windows
package logrus package logrus

View File

@ -0,0 +1,20 @@
// +build !appengine,!js,windows
package logrus
import (
"io"
"os"
"syscall"
)
func checkIfTerminal(w io.Writer) bool {
switch v := w.(type) {
case *os.File:
var mode uint32
err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode)
return err == nil
default:
return false
}
}

View File

@ -3,12 +3,19 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build !appengine,!gopherjs // +build !appengine,!js
package logrus package logrus
import "golang.org/x/sys/unix" import (
"io"
"golang.org/x/sys/unix"
)
const ioctlReadTermios = unix.TCGETS const ioctlReadTermios = unix.TCGETS
type Termios unix.Termios type Termios unix.Termios
func initTerminal(w io.Writer) {
}

18
vendor/github.com/sirupsen/logrus/terminal_windows.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
// +build !appengine,!js,windows
package logrus
import (
"io"
"os"
"syscall"
sequences "github.com/konsorten/go-windows-terminal-sequences"
)
func initTerminal(w io.Writer) {
switch v := w.(type) {
case *os.File:
sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true)
}
}

View File

@ -3,6 +3,7 @@ package logrus
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"os"
"sort" "sort"
"strings" "strings"
"sync" "sync"
@ -20,6 +21,7 @@ const (
var ( var (
baseTimestamp time.Time baseTimestamp time.Time
emptyFieldMap FieldMap
) )
func init() { func init() {
@ -34,6 +36,9 @@ type TextFormatter struct {
// Force disabling colors. // Force disabling colors.
DisableColors bool DisableColors bool
// Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/
EnvironmentOverrideColors bool
// Disable timestamp logging. useful when output is redirected to logging // Disable timestamp logging. useful when output is redirected to logging
// system that already adds timestamps. // system that already adds timestamps.
DisableTimestamp bool DisableTimestamp bool
@ -50,60 +55,119 @@ type TextFormatter struct {
// be desired. // be desired.
DisableSorting bool DisableSorting bool
// The keys sorting function, when uninitialized it uses sort.Strings.
SortingFunc func([]string)
// Disables the truncation of the level text to 4 characters.
DisableLevelTruncation bool
// QuoteEmptyFields will wrap empty fields in quotes if true // QuoteEmptyFields will wrap empty fields in quotes if true
QuoteEmptyFields bool QuoteEmptyFields bool
// Whether the logger's out is to a terminal // Whether the logger's out is to a terminal
isTerminal bool isTerminal bool
sync.Once // FieldMap allows users to customize the names of keys for default fields.
// As an example:
// formatter := &TextFormatter{
// FieldMap: FieldMap{
// FieldKeyTime: "@timestamp",
// FieldKeyLevel: "@level",
// FieldKeyMsg: "@message"}}
FieldMap FieldMap
terminalInitOnce sync.Once
} }
func (f *TextFormatter) init(entry *Entry) { func (f *TextFormatter) init(entry *Entry) {
if entry.Logger != nil { if entry.Logger != nil {
f.isTerminal = checkIfTerminal(entry.Logger.Out) f.isTerminal = checkIfTerminal(entry.Logger.Out)
if f.isTerminal {
initTerminal(entry.Logger.Out)
} }
}
}
func (f *TextFormatter) isColored() bool {
isColored := f.ForceColors || f.isTerminal
if f.EnvironmentOverrideColors {
if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" {
isColored = true
} else if ok && force == "0" {
isColored = false
} else if os.Getenv("CLICOLOR") == "0" {
isColored = false
}
}
return isColored && !f.DisableColors
} }
// Format renders a single log entry // Format renders a single log entry
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
var b *bytes.Buffer prefixFieldClashes(entry.Data, f.FieldMap)
keys := make([]string, 0, len(entry.Data)) keys := make([]string, 0, len(entry.Data))
for k := range entry.Data { for k := range entry.Data {
keys = append(keys, k) keys = append(keys, k)
} }
if !f.DisableSorting { fixedKeys := make([]string, 0, 3+len(entry.Data))
sort.Strings(keys) if !f.DisableTimestamp {
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime))
} }
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel))
if entry.Message != "" {
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg))
}
if !f.DisableSorting {
if f.SortingFunc == nil {
sort.Strings(keys)
fixedKeys = append(fixedKeys, keys...)
} else {
if !f.isColored() {
fixedKeys = append(fixedKeys, keys...)
f.SortingFunc(fixedKeys)
} else {
f.SortingFunc(keys)
}
}
} else {
fixedKeys = append(fixedKeys, keys...)
}
var b *bytes.Buffer
if entry.Buffer != nil { if entry.Buffer != nil {
b = entry.Buffer b = entry.Buffer
} else { } else {
b = &bytes.Buffer{} b = &bytes.Buffer{}
} }
prefixFieldClashes(entry.Data) f.terminalInitOnce.Do(func() { f.init(entry) })
f.Do(func() { f.init(entry) })
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
timestampFormat := f.TimestampFormat timestampFormat := f.TimestampFormat
if timestampFormat == "" { if timestampFormat == "" {
timestampFormat = defaultTimestampFormat timestampFormat = defaultTimestampFormat
} }
if isColored { if f.isColored() {
f.printColored(b, entry, keys, timestampFormat) f.printColored(b, entry, keys, timestampFormat)
} else { } else {
if !f.DisableTimestamp { for _, key := range fixedKeys {
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) var value interface{}
switch key {
case f.FieldMap.resolve(FieldKeyTime):
value = entry.Time.Format(timestampFormat)
case f.FieldMap.resolve(FieldKeyLevel):
value = entry.Level.String()
case f.FieldMap.resolve(FieldKeyMsg):
value = entry.Message
default:
value = entry.Data[key]
} }
f.appendKeyValue(b, "level", entry.Level.String()) f.appendKeyValue(b, key, value)
if entry.Message != "" {
f.appendKeyValue(b, "msg", entry.Message)
}
for _, key := range keys {
f.appendKeyValue(b, key, entry.Data[key])
} }
} }
@ -124,7 +188,14 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
levelColor = blue levelColor = blue
} }
levelText := strings.ToUpper(entry.Level.String())[0:4] levelText := strings.ToUpper(entry.Level.String())
if !f.DisableLevelTruncation {
levelText = levelText[0:4]
}
// Remove a single newline if it already exists in the message to keep
// the behavior of logrus text_formatter the same as the stdlib log package
entry.Message = strings.TrimSuffix(entry.Message, "\n")
if f.DisableTimestamp { if f.DisableTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message) fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)

View File

@ -131,6 +131,9 @@ func (f *File) Sync() error {
} }
func (f *File) Readdir(count int) (res []os.FileInfo, err error) { func (f *File) Readdir(count int) (res []os.FileInfo, err error) {
if !f.fileData.dir {
return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")}
}
var outLength int64 var outLength int64
f.fileData.Lock() f.fileData.Lock()

View File

@ -1,6 +1,7 @@
package pflag package pflag
import ( import (
"encoding/base64"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"strings" "strings"
@ -9,10 +10,12 @@ import (
// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded // BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded
type bytesHexValue []byte type bytesHexValue []byte
// String implements pflag.Value.String.
func (bytesHex bytesHexValue) String() string { func (bytesHex bytesHexValue) String() string {
return fmt.Sprintf("%X", []byte(bytesHex)) return fmt.Sprintf("%X", []byte(bytesHex))
} }
// Set implements pflag.Value.Set.
func (bytesHex *bytesHexValue) Set(value string) error { func (bytesHex *bytesHexValue) Set(value string) error {
bin, err := hex.DecodeString(strings.TrimSpace(value)) bin, err := hex.DecodeString(strings.TrimSpace(value))
@ -25,6 +28,7 @@ func (bytesHex *bytesHexValue) Set(value string) error {
return nil return nil
} }
// Type implements pflag.Value.Type.
func (*bytesHexValue) Type() string { func (*bytesHexValue) Type() string {
return "bytesHex" return "bytesHex"
} }
@ -103,3 +107,103 @@ func BytesHex(name string, value []byte, usage string) *[]byte {
func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte { func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
return CommandLine.BytesHexP(name, shorthand, value, usage) return CommandLine.BytesHexP(name, shorthand, value, usage)
} }
// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded
type bytesBase64Value []byte
// String implements pflag.Value.String.
func (bytesBase64 bytesBase64Value) String() string {
return base64.StdEncoding.EncodeToString([]byte(bytesBase64))
}
// Set implements pflag.Value.Set.
func (bytesBase64 *bytesBase64Value) Set(value string) error {
bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value))
if err != nil {
return err
}
*bytesBase64 = bin
return nil
}
// Type implements pflag.Value.Type.
func (*bytesBase64Value) Type() string {
return "bytesBase64"
}
func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value {
*p = val
return (*bytesBase64Value)(p)
}
func bytesBase64ValueConv(sval string) (interface{}, error) {
bin, err := base64.StdEncoding.DecodeString(sval)
if err == nil {
return bin, nil
}
return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
}
// GetBytesBase64 return the []byte value of a flag with the given name
func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) {
val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv)
if err != nil {
return []byte{}, err
}
return val.([]byte), nil
}
// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
// The argument p points to an []byte variable in which to store the value of the flag.
func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
f.VarP(newBytesBase64Value(value, p), name, "", usage)
}
// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
f.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
}
// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
// The argument p points to an []byte variable in which to store the value of the flag.
func BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage)
}
// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
}
// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
// The return value is the address of an []byte variable that stores the value of the flag.
func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte {
p := new([]byte)
f.BytesBase64VarP(p, name, "", value, usage)
return p
}
// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
p := new([]byte)
f.BytesBase64VarP(p, name, shorthand, value, usage)
return p
}
// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
// The return value is the address of an []byte variable that stores the value of the flag.
func BytesBase64(name string, value []byte, usage string) *[]byte {
return CommandLine.BytesBase64P(name, "", value, usage)
}
// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
return CommandLine.BytesBase64P(name, shorthand, value, usage)
}

View File

@ -925,13 +925,16 @@ func stripUnknownFlagValue(args []string) []string {
} }
first := args[0] first := args[0]
if first[0] == '-' { if len(first) > 0 && first[0] == '-' {
//--unknown --next-flag ... //--unknown --next-flag ...
return args return args
} }
//--unknown arg ... (args will be arg ...) //--unknown arg ... (args will be arg ...)
if len(args) > 1 {
return args[1:] return args[1:]
}
return nil
} }
func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) { func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) {
@ -990,11 +993,12 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin
} }
func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) { func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) {
outArgs = args
if strings.HasPrefix(shorthands, "test.") { if strings.HasPrefix(shorthands, "test.") {
return return
} }
outArgs = args
outShorts = shorthands[1:] outShorts = shorthands[1:]
c := shorthands[0] c := shorthands[0]

149
vendor/github.com/spf13/pflag/string_to_int.go generated vendored Normal file
View File

@ -0,0 +1,149 @@
package pflag
import (
"bytes"
"fmt"
"strconv"
"strings"
)
// -- stringToInt Value
type stringToIntValue struct {
value *map[string]int
changed bool
}
func newStringToIntValue(val map[string]int, p *map[string]int) *stringToIntValue {
ssv := new(stringToIntValue)
ssv.value = p
*ssv.value = val
return ssv
}
// Format: a=1,b=2
func (s *stringToIntValue) Set(val string) error {
ss := strings.Split(val, ",")
out := make(map[string]int, len(ss))
for _, pair := range ss {
kv := strings.SplitN(pair, "=", 2)
if len(kv) != 2 {
return fmt.Errorf("%s must be formatted as key=value", pair)
}
var err error
out[kv[0]], err = strconv.Atoi(kv[1])
if err != nil {
return err
}
}
if !s.changed {
*s.value = out
} else {
for k, v := range out {
(*s.value)[k] = v
}
}
s.changed = true
return nil
}
func (s *stringToIntValue) Type() string {
return "stringToInt"
}
func (s *stringToIntValue) String() string {
var buf bytes.Buffer
i := 0
for k, v := range *s.value {
if i > 0 {
buf.WriteRune(',')
}
buf.WriteString(k)
buf.WriteRune('=')
buf.WriteString(strconv.Itoa(v))
i++
}
return "[" + buf.String() + "]"
}
func stringToIntConv(val string) (interface{}, error) {
val = strings.Trim(val, "[]")
// An empty string would cause an empty map
if len(val) == 0 {
return map[string]int{}, nil
}
ss := strings.Split(val, ",")
out := make(map[string]int, len(ss))
for _, pair := range ss {
kv := strings.SplitN(pair, "=", 2)
if len(kv) != 2 {
return nil, fmt.Errorf("%s must be formatted as key=value", pair)
}
var err error
out[kv[0]], err = strconv.Atoi(kv[1])
if err != nil {
return nil, err
}
}
return out, nil
}
// GetStringToInt return the map[string]int value of a flag with the given name
func (f *FlagSet) GetStringToInt(name string) (map[string]int, error) {
val, err := f.getFlagType(name, "stringToInt", stringToIntConv)
if err != nil {
return map[string]int{}, err
}
return val.(map[string]int), nil
}
// StringToIntVar defines a string flag with specified name, default value, and usage string.
// The argument p points to a map[string]int variable in which to store the values of the multiple flags.
// The value of each argument will not try to be separated by comma
func (f *FlagSet) StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
f.VarP(newStringToIntValue(value, p), name, "", usage)
}
// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
f.VarP(newStringToIntValue(value, p), name, shorthand, usage)
}
// StringToIntVar defines a string flag with specified name, default value, and usage string.
// The argument p points to a map[string]int variable in which to store the value of the flag.
// The value of each argument will not try to be separated by comma
func StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
CommandLine.VarP(newStringToIntValue(value, p), name, "", usage)
}
// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
func StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
CommandLine.VarP(newStringToIntValue(value, p), name, shorthand, usage)
}
// StringToInt defines a string flag with specified name, default value, and usage string.
// The return value is the address of a map[string]int variable that stores the value of the flag.
// The value of each argument will not try to be separated by comma
func (f *FlagSet) StringToInt(name string, value map[string]int, usage string) *map[string]int {
p := map[string]int{}
f.StringToIntVarP(&p, name, "", value, usage)
return &p
}
// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
p := map[string]int{}
f.StringToIntVarP(&p, name, shorthand, value, usage)
return &p
}
// StringToInt defines a string flag with specified name, default value, and usage string.
// The return value is the address of a map[string]int variable that stores the value of the flag.
// The value of each argument will not try to be separated by comma
func StringToInt(name string, value map[string]int, usage string) *map[string]int {
return CommandLine.StringToIntP(name, "", value, usage)
}
// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
func StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
return CommandLine.StringToIntP(name, shorthand, value, usage)
}

160
vendor/github.com/spf13/pflag/string_to_string.go generated vendored Normal file
View File

@ -0,0 +1,160 @@
package pflag
import (
"bytes"
"encoding/csv"
"fmt"
"strings"
)
// -- stringToString Value
type stringToStringValue struct {
value *map[string]string
changed bool
}
func newStringToStringValue(val map[string]string, p *map[string]string) *stringToStringValue {
ssv := new(stringToStringValue)
ssv.value = p
*ssv.value = val
return ssv
}
// Format: a=1,b=2
func (s *stringToStringValue) Set(val string) error {
var ss []string
n := strings.Count(val, "=")
switch n {
case 0:
return fmt.Errorf("%s must be formatted as key=value", val)
case 1:
ss = append(ss, strings.Trim(val, `"`))
default:
r := csv.NewReader(strings.NewReader(val))
var err error
ss, err = r.Read()
if err != nil {
return err
}
}
out := make(map[string]string, len(ss))
for _, pair := range ss {
kv := strings.SplitN(pair, "=", 2)
if len(kv) != 2 {
return fmt.Errorf("%s must be formatted as key=value", pair)
}
out[kv[0]] = kv[1]
}
if !s.changed {
*s.value = out
} else {
for k, v := range out {
(*s.value)[k] = v
}
}
s.changed = true
return nil
}
func (s *stringToStringValue) Type() string {
return "stringToString"
}
func (s *stringToStringValue) String() string {
records := make([]string, 0, len(*s.value)>>1)
for k, v := range *s.value {
records = append(records, k+"="+v)
}
var buf bytes.Buffer
w := csv.NewWriter(&buf)
if err := w.Write(records); err != nil {
panic(err)
}
w.Flush()
return "[" + strings.TrimSpace(buf.String()) + "]"
}
func stringToStringConv(val string) (interface{}, error) {
val = strings.Trim(val, "[]")
// An empty string would cause an empty map
if len(val) == 0 {
return map[string]string{}, nil
}
r := csv.NewReader(strings.NewReader(val))
ss, err := r.Read()
if err != nil {
return nil, err
}
out := make(map[string]string, len(ss))
for _, pair := range ss {
kv := strings.SplitN(pair, "=", 2)
if len(kv) != 2 {
return nil, fmt.Errorf("%s must be formatted as key=value", pair)
}
out[kv[0]] = kv[1]
}
return out, nil
}
// GetStringToString return the map[string]string value of a flag with the given name
func (f *FlagSet) GetStringToString(name string) (map[string]string, error) {
val, err := f.getFlagType(name, "stringToString", stringToStringConv)
if err != nil {
return map[string]string{}, err
}
return val.(map[string]string), nil
}
// StringToStringVar defines a string flag with specified name, default value, and usage string.
// The argument p points to a map[string]string variable in which to store the values of the multiple flags.
// The value of each argument will not try to be separated by comma
func (f *FlagSet) StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
f.VarP(newStringToStringValue(value, p), name, "", usage)
}
// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
f.VarP(newStringToStringValue(value, p), name, shorthand, usage)
}
// StringToStringVar defines a string flag with specified name, default value, and usage string.
// The argument p points to a map[string]string variable in which to store the value of the flag.
// The value of each argument will not try to be separated by comma
func StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
CommandLine.VarP(newStringToStringValue(value, p), name, "", usage)
}
// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
func StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
CommandLine.VarP(newStringToStringValue(value, p), name, shorthand, usage)
}
// StringToString defines a string flag with specified name, default value, and usage string.
// The return value is the address of a map[string]string variable that stores the value of the flag.
// The value of each argument will not try to be separated by comma
func (f *FlagSet) StringToString(name string, value map[string]string, usage string) *map[string]string {
p := map[string]string{}
f.StringToStringVarP(&p, name, "", value, usage)
return &p
}
// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
p := map[string]string{}
f.StringToStringVarP(&p, name, shorthand, value, usage)
return &p
}
// StringToString defines a string flag with specified name, default value, and usage string.
// The return value is the address of a map[string]string variable that stores the value of the flag.
// The value of each argument will not try to be separated by comma
func StringToString(name string, value map[string]string, usage string) *map[string]string {
return CommandLine.StringToStringP(name, "", value, usage)
}
// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
func StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
return CommandLine.StringToStringP(name, shorthand, value, usage)
}

View File

@ -113,6 +113,23 @@ func (fnfe ConfigFileNotFoundError) Error() string {
return fmt.Sprintf("Config File %q Not Found in %q", fnfe.name, fnfe.locations) return fmt.Sprintf("Config File %q Not Found in %q", fnfe.name, fnfe.locations)
} }
// A DecoderConfigOption can be passed to viper.Unmarshal to configure
// mapstructure.DecoderConfig options
type DecoderConfigOption func(*mapstructure.DecoderConfig)
// DecodeHook returns a DecoderConfigOption which overrides the default
// DecoderConfig.DecodeHook value, the default is:
//
// mapstructure.ComposeDecodeHookFunc(
// mapstructure.StringToTimeDurationHookFunc(),
// mapstructure.StringToSliceHookFunc(","),
// )
func DecodeHook(hook mapstructure.DecodeHookFunc) DecoderConfigOption {
return func(c *mapstructure.DecoderConfig) {
c.DecodeHook = hook
}
}
// Viper is a prioritized configuration registry. It // Viper is a prioritized configuration registry. It
// maintains a set of configuration sources, fetches // maintains a set of configuration sources, fetches
// values to populate those, and provides them according // values to populate those, and provides them according
@ -289,9 +306,11 @@ func (v *Viper) WatchConfig() {
if err != nil { if err != nil {
log.Println("error:", err) log.Println("error:", err)
} }
if v.onConfigChange != nil {
v.onConfigChange(event) v.onConfigChange(event)
} }
} }
}
case err := <-watcher.Errors: case err := <-watcher.Errors:
log.Println("error:", err) log.Println("error:", err)
} }
@ -631,8 +650,10 @@ func (v *Viper) Get(key string) interface{} {
return cast.ToBool(val) return cast.ToBool(val)
case string: case string:
return cast.ToString(val) return cast.ToString(val)
case int64, int32, int16, int8, int: case int32, int16, int8, int:
return cast.ToInt(val) return cast.ToInt(val)
case int64:
return cast.ToInt64(val)
case float64, float32: case float64, float32:
return cast.ToFloat64(val) return cast.ToFloat64(val)
case time.Time: case time.Time:
@ -682,6 +703,12 @@ func (v *Viper) GetInt(key string) int {
return cast.ToInt(v.Get(key)) return cast.ToInt(v.Get(key))
} }
// GetInt32 returns the value associated with the key as an integer.
func GetInt32(key string) int32 { return v.GetInt32(key) }
func (v *Viper) GetInt32(key string) int32 {
return cast.ToInt32(v.Get(key))
}
// GetInt64 returns the value associated with the key as an integer. // GetInt64 returns the value associated with the key as an integer.
func GetInt64(key string) int64 { return v.GetInt64(key) } func GetInt64(key string) int64 { return v.GetInt64(key) }
func (v *Viper) GetInt64(key string) int64 { func (v *Viper) GetInt64(key string) int64 {
@ -739,9 +766,11 @@ func (v *Viper) GetSizeInBytes(key string) uint {
} }
// UnmarshalKey takes a single key and unmarshals it into a Struct. // UnmarshalKey takes a single key and unmarshals it into a Struct.
func UnmarshalKey(key string, rawVal interface{}) error { return v.UnmarshalKey(key, rawVal) } func UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error {
func (v *Viper) UnmarshalKey(key string, rawVal interface{}) error { return v.UnmarshalKey(key, rawVal, opts...)
err := decode(v.Get(key), defaultDecoderConfig(rawVal)) }
func (v *Viper) UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error {
err := decode(v.Get(key), defaultDecoderConfig(rawVal, opts...))
if err != nil { if err != nil {
return err return err
@ -754,9 +783,11 @@ func (v *Viper) UnmarshalKey(key string, rawVal interface{}) error {
// Unmarshal unmarshals the config into a Struct. Make sure that the tags // Unmarshal unmarshals the config into a Struct. Make sure that the tags
// on the fields of the structure are properly set. // on the fields of the structure are properly set.
func Unmarshal(rawVal interface{}) error { return v.Unmarshal(rawVal) } func Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error {
func (v *Viper) Unmarshal(rawVal interface{}) error { return v.Unmarshal(rawVal, opts...)
err := decode(v.AllSettings(), defaultDecoderConfig(rawVal)) }
func (v *Viper) Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error {
err := decode(v.AllSettings(), defaultDecoderConfig(rawVal, opts...))
if err != nil { if err != nil {
return err return err
@ -769,8 +800,8 @@ func (v *Viper) Unmarshal(rawVal interface{}) error {
// defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot // defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot
// of time.Duration values & string slices // of time.Duration values & string slices
func defaultDecoderConfig(output interface{}) *mapstructure.DecoderConfig { func defaultDecoderConfig(output interface{}, opts ...DecoderConfigOption) *mapstructure.DecoderConfig {
return &mapstructure.DecoderConfig{ c := &mapstructure.DecoderConfig{
Metadata: nil, Metadata: nil,
Result: output, Result: output,
WeaklyTypedInput: true, WeaklyTypedInput: true,
@ -779,6 +810,10 @@ func defaultDecoderConfig(output interface{}) *mapstructure.DecoderConfig {
mapstructure.StringToSliceHookFunc(","), mapstructure.StringToSliceHookFunc(","),
), ),
} }
for _, opt := range opts {
opt(c)
}
return c
} }
// A wrapper around mapstructure.Decode that mimics the WeakDecode functionality // A wrapper around mapstructure.Decode that mimics the WeakDecode functionality
@ -1108,7 +1143,7 @@ func (v *Viper) SetDefault(key string, value interface{}) {
deepestMap[lastKey] = value deepestMap[lastKey] = value
} }
// Set sets the value for the key in the override regiser. // Set sets the value for the key in the override register.
// Set is case-insensitive for a key. // Set is case-insensitive for a key.
// Will be used instead of values obtained via // Will be used instead of values obtained via
// flags, config file, ENV, default, or key/value store. // flags, config file, ENV, default, or key/value store.

56
vendor/golang.org/x/net/context/context.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package context defines the Context type, which carries deadlines,
// cancelation signals, and other request-scoped values across API boundaries
// and between processes.
// As of Go 1.7 this package is available in the standard library under the
// name context. https://golang.org/pkg/context.
//
// Incoming requests to a server should create a Context, and outgoing calls to
// servers should accept a Context. The chain of function calls between must
// propagate the Context, optionally replacing it with a modified copy created
// using WithDeadline, WithTimeout, WithCancel, or WithValue.
//
// Programs that use Contexts should follow these rules to keep interfaces
// consistent across packages and enable static analysis tools to check context
// propagation:
//
// Do not store Contexts inside a struct type; instead, pass a Context
// explicitly to each function that needs it. The Context should be the first
// parameter, typically named ctx:
//
// func DoSomething(ctx context.Context, arg Arg) error {
// // ... use ctx ...
// }
//
// Do not pass a nil Context, even if a function permits it. Pass context.TODO
// if you are unsure about which Context to use.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
//
// The same Context may be passed to functions running in different goroutines;
// Contexts are safe for simultaneous use by multiple goroutines.
//
// See http://blog.golang.org/context for example code for a server that uses
// Contexts.
package context // import "golang.org/x/net/context"
// Background returns a non-nil, empty Context. It is never canceled, has no
// values, and has no deadline. It is typically used by the main function,
// initialization, and tests, and as the top-level Context for incoming
// requests.
func Background() Context {
return background
}
// TODO returns a non-nil, empty Context. Code should use context.TODO when
// it's unclear which Context to use or it is not yet available (because the
// surrounding function has not yet been extended to accept a Context
// parameter). TODO is recognized by static analysis tools that determine
// whether Contexts are propagated correctly in a program.
func TODO() Context {
return todo
}

72
vendor/golang.org/x/net/context/go17.go generated vendored Normal file
View File

@ -0,0 +1,72 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7
package context
import (
"context" // standard library's context, as of Go 1.7
"time"
)
var (
todo = context.TODO()
background = context.Background()
)
// Canceled is the error returned by Context.Err when the context is canceled.
var Canceled = context.Canceled
// DeadlineExceeded is the error returned by Context.Err when the context's
// deadline passes.
var DeadlineExceeded = context.DeadlineExceeded
// WithCancel returns a copy of parent with a new Done channel. The returned
// context's Done channel is closed when the returned cancel function is called
// or when the parent context's Done channel is closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
ctx, f := context.WithCancel(parent)
return ctx, CancelFunc(f)
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
ctx, f := context.WithDeadline(parent, deadline)
return ctx, CancelFunc(f)
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
// WithValue returns a copy of parent in which the value associated with key is
// val.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
func WithValue(parent Context, key interface{}, val interface{}) Context {
return context.WithValue(parent, key, val)
}

20
vendor/golang.org/x/net/context/go19.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.9
package context
import "context" // standard library's context, as of Go 1.7
// A Context carries a deadline, a cancelation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context = context.Context
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc = context.CancelFunc

300
vendor/golang.org/x/net/context/pre_go17.go generated vendored Normal file
View File

@ -0,0 +1,300 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.7
package context
import (
"errors"
"fmt"
"sync"
"time"
)
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
// struct{}, since vars of this type must have distinct addresses.
type emptyCtx int
func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
return
}
func (*emptyCtx) Done() <-chan struct{} {
return nil
}
func (*emptyCtx) Err() error {
return nil
}
func (*emptyCtx) Value(key interface{}) interface{} {
return nil
}
func (e *emptyCtx) String() string {
switch e {
case background:
return "context.Background"
case todo:
return "context.TODO"
}
return "unknown empty Context"
}
var (
background = new(emptyCtx)
todo = new(emptyCtx)
)
// Canceled is the error returned by Context.Err when the context is canceled.
var Canceled = errors.New("context canceled")
// DeadlineExceeded is the error returned by Context.Err when the context's
// deadline passes.
var DeadlineExceeded = errors.New("context deadline exceeded")
// WithCancel returns a copy of parent with a new Done channel. The returned
// context's Done channel is closed when the returned cancel function is called
// or when the parent context's Done channel is closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
c := newCancelCtx(parent)
propagateCancel(parent, c)
return c, func() { c.cancel(true, Canceled) }
}
// newCancelCtx returns an initialized cancelCtx.
func newCancelCtx(parent Context) *cancelCtx {
return &cancelCtx{
Context: parent,
done: make(chan struct{}),
}
}
// propagateCancel arranges for child to be canceled when parent is.
func propagateCancel(parent Context, child canceler) {
if parent.Done() == nil {
return // parent is never canceled
}
if p, ok := parentCancelCtx(parent); ok {
p.mu.Lock()
if p.err != nil {
// parent has already been canceled
child.cancel(false, p.err)
} else {
if p.children == nil {
p.children = make(map[canceler]bool)
}
p.children[child] = true
}
p.mu.Unlock()
} else {
go func() {
select {
case <-parent.Done():
child.cancel(false, parent.Err())
case <-child.Done():
}
}()
}
}
// parentCancelCtx follows a chain of parent references until it finds a
// *cancelCtx. This function understands how each of the concrete types in this
// package represents its parent.
func parentCancelCtx(parent Context) (*cancelCtx, bool) {
for {
switch c := parent.(type) {
case *cancelCtx:
return c, true
case *timerCtx:
return c.cancelCtx, true
case *valueCtx:
parent = c.Context
default:
return nil, false
}
}
}
// removeChild removes a context from its parent.
func removeChild(parent Context, child canceler) {
p, ok := parentCancelCtx(parent)
if !ok {
return
}
p.mu.Lock()
if p.children != nil {
delete(p.children, child)
}
p.mu.Unlock()
}
// A canceler is a context type that can be canceled directly. The
// implementations are *cancelCtx and *timerCtx.
type canceler interface {
cancel(removeFromParent bool, err error)
Done() <-chan struct{}
}
// A cancelCtx can be canceled. When canceled, it also cancels any children
// that implement canceler.
type cancelCtx struct {
Context
done chan struct{} // closed by the first cancel call.
mu sync.Mutex
children map[canceler]bool // set to nil by the first cancel call
err error // set to non-nil by the first cancel call
}
func (c *cancelCtx) Done() <-chan struct{} {
return c.done
}
func (c *cancelCtx) Err() error {
c.mu.Lock()
defer c.mu.Unlock()
return c.err
}
func (c *cancelCtx) String() string {
return fmt.Sprintf("%v.WithCancel", c.Context)
}
// cancel closes c.done, cancels each of c's children, and, if
// removeFromParent is true, removes c from its parent's children.
func (c *cancelCtx) cancel(removeFromParent bool, err error) {
if err == nil {
panic("context: internal error: missing cancel error")
}
c.mu.Lock()
if c.err != nil {
c.mu.Unlock()
return // already canceled
}
c.err = err
close(c.done)
for child := range c.children {
// NOTE: acquiring the child's lock while holding parent's lock.
child.cancel(false, err)
}
c.children = nil
c.mu.Unlock()
if removeFromParent {
removeChild(c.Context, c)
}
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
// The current deadline is already sooner than the new one.
return WithCancel(parent)
}
c := &timerCtx{
cancelCtx: newCancelCtx(parent),
deadline: deadline,
}
propagateCancel(parent, c)
d := deadline.Sub(time.Now())
if d <= 0 {
c.cancel(true, DeadlineExceeded) // deadline has already passed
return c, func() { c.cancel(true, Canceled) }
}
c.mu.Lock()
defer c.mu.Unlock()
if c.err == nil {
c.timer = time.AfterFunc(d, func() {
c.cancel(true, DeadlineExceeded)
})
}
return c, func() { c.cancel(true, Canceled) }
}
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
// implement Done and Err. It implements cancel by stopping its timer then
// delegating to cancelCtx.cancel.
type timerCtx struct {
*cancelCtx
timer *time.Timer // Under cancelCtx.mu.
deadline time.Time
}
func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
return c.deadline, true
}
func (c *timerCtx) String() string {
return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
}
func (c *timerCtx) cancel(removeFromParent bool, err error) {
c.cancelCtx.cancel(false, err)
if removeFromParent {
// Remove this timerCtx from its parent cancelCtx's children.
removeChild(c.cancelCtx.Context, c)
}
c.mu.Lock()
if c.timer != nil {
c.timer.Stop()
c.timer = nil
}
c.mu.Unlock()
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
// WithValue returns a copy of parent in which the value associated with key is
// val.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
func WithValue(parent Context, key interface{}, val interface{}) Context {
return &valueCtx{parent, key, val}
}
// A valueCtx carries a key-value pair. It implements Value for that key and
// delegates all other calls to the embedded Context.
type valueCtx struct {
Context
key, val interface{}
}
func (c *valueCtx) String() string {
return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
}
func (c *valueCtx) Value(key interface{}) interface{} {
if c.key == key {
return c.val
}
return c.Context.Value(key)
}

109
vendor/golang.org/x/net/context/pre_go19.go generated vendored Normal file
View File

@ -0,0 +1,109 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.9
package context
import "time"
// A Context carries a deadline, a cancelation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context interface {
// Deadline returns the time when work done on behalf of this context
// should be canceled. Deadline returns ok==false when no deadline is
// set. Successive calls to Deadline return the same results.
Deadline() (deadline time.Time, ok bool)
// Done returns a channel that's closed when work done on behalf of this
// context should be canceled. Done may return nil if this context can
// never be canceled. Successive calls to Done return the same value.
//
// WithCancel arranges for Done to be closed when cancel is called;
// WithDeadline arranges for Done to be closed when the deadline
// expires; WithTimeout arranges for Done to be closed when the timeout
// elapses.
//
// Done is provided for use in select statements:
//
// // Stream generates values with DoSomething and sends them to out
// // until DoSomething returns an error or ctx.Done is closed.
// func Stream(ctx context.Context, out chan<- Value) error {
// for {
// v, err := DoSomething(ctx)
// if err != nil {
// return err
// }
// select {
// case <-ctx.Done():
// return ctx.Err()
// case out <- v:
// }
// }
// }
//
// See http://blog.golang.org/pipelines for more examples of how to use
// a Done channel for cancelation.
Done() <-chan struct{}
// Err returns a non-nil error value after Done is closed. Err returns
// Canceled if the context was canceled or DeadlineExceeded if the
// context's deadline passed. No other values for Err are defined.
// After Done is closed, successive calls to Err return the same value.
Err() error
// Value returns the value associated with this context for key, or nil
// if no value is associated with key. Successive calls to Value with
// the same key returns the same result.
//
// Use context values only for request-scoped data that transits
// processes and API boundaries, not for passing optional parameters to
// functions.
//
// A key identifies a specific value in a Context. Functions that wish
// to store values in Context typically allocate a key in a global
// variable then use that key as the argument to context.WithValue and
// Context.Value. A key can be any type that supports equality;
// packages should define keys as an unexported type to avoid
// collisions.
//
// Packages that define a Context key should provide type-safe accessors
// for the values stores using that key:
//
// // Package user defines a User type that's stored in Contexts.
// package user
//
// import "golang.org/x/net/context"
//
// // User is the type of value stored in the Contexts.
// type User struct {...}
//
// // key is an unexported type for keys defined in this package.
// // This prevents collisions with keys defined in other packages.
// type key int
//
// // userKey is the key for user.User values in Contexts. It is
// // unexported; clients use user.NewContext and user.FromContext
// // instead of using this key directly.
// var userKey key = 0
//
// // NewContext returns a new Context that carries value u.
// func NewContext(ctx context.Context, u *User) context.Context {
// return context.WithValue(ctx, userKey, u)
// }
//
// // FromContext returns the User value stored in ctx, if any.
// func FromContext(ctx context.Context) (*User, bool) {
// u, ok := ctx.Value(userKey).(*User)
// return u, ok
// }
Value(key interface{}) interface{}
}
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc func()

50
vendor/golang.org/x/net/http/httpguts/guts.go generated vendored Normal file
View File

@ -0,0 +1,50 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package httpguts provides functions implementing various details
// of the HTTP specification.
//
// This package is shared by the standard library (which vendors it)
// and x/net/http2. It comes with no API stability promise.
package httpguts
import (
"net/textproto"
"strings"
)
// ValidTrailerHeader reports whether name is a valid header field name to appear
// in trailers.
// See RFC 7230, Section 4.1.2
func ValidTrailerHeader(name string) bool {
name = textproto.CanonicalMIMEHeaderKey(name)
if strings.HasPrefix(name, "If-") || badTrailer[name] {
return false
}
return true
}
var badTrailer = map[string]bool{
"Authorization": true,
"Cache-Control": true,
"Connection": true,
"Content-Encoding": true,
"Content-Length": true,
"Content-Range": true,
"Content-Type": true,
"Expect": true,
"Host": true,
"Keep-Alive": true,
"Max-Forwards": true,
"Pragma": true,
"Proxy-Authenticate": true,
"Proxy-Authorization": true,
"Proxy-Connection": true,
"Range": true,
"Realm": true,
"Te": true,
"Trailer": true,
"Transfer-Encoding": true,
"Www-Authenticate": true,
}

View File

@ -2,12 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Package httplex contains rules around lexical matters of various package httpguts
// HTTP-related specifications.
//
// This package is shared by the standard library (which vendors it)
// and x/net/http2. It comes with no API stability promise.
package httplex
import ( import (
"net" "net"

View File

@ -52,9 +52,31 @@ const (
noDialOnMiss = false noDialOnMiss = false
) )
// shouldTraceGetConn reports whether getClientConn should call any
// ClientTrace.GetConn hook associated with the http.Request.
//
// This complexity is needed to avoid double calls of the GetConn hook
// during the back-and-forth between net/http and x/net/http2 (when the
// net/http.Transport is upgraded to also speak http2), as well as support
// the case where x/net/http2 is being used directly.
func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool {
// If our Transport wasn't made via ConfigureTransport, always
// trace the GetConn hook if provided, because that means the
// http2 package is being used directly and it's the one
// dialing, as opposed to net/http.
if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok {
return true
}
// Otherwise, only use the GetConn hook if this connection has
// been used previously for other requests. For fresh
// connections, the net/http package does the dialing.
return !st.freshConn
}
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
if isConnectionCloseRequest(req) && dialOnMiss { if isConnectionCloseRequest(req) && dialOnMiss {
// It gets its own connection. // It gets its own connection.
traceGetConn(req, addr)
const singleUse = true const singleUse = true
cc, err := p.t.dialClientConn(addr, singleUse) cc, err := p.t.dialClientConn(addr, singleUse)
if err != nil { if err != nil {
@ -64,7 +86,10 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
} }
p.mu.Lock() p.mu.Lock()
for _, cc := range p.conns[addr] { for _, cc := range p.conns[addr] {
if cc.CanTakeNewRequest() { if st := cc.idleState(); st.canTakeNewRequest {
if p.shouldTraceGetConn(st) {
traceGetConn(req, addr)
}
p.mu.Unlock() p.mu.Unlock()
return cc, nil return cc, nil
} }
@ -73,6 +98,7 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
p.mu.Unlock() p.mu.Unlock()
return nil, ErrNoCachedConn return nil, ErrNoCachedConn
} }
traceGetConn(req, addr)
call := p.getStartDialLocked(addr) call := p.getStartDialLocked(addr)
p.mu.Unlock() p.mu.Unlock()
<-call.done <-call.done

View File

@ -57,7 +57,7 @@ func configureTransport(t1 *http.Transport) (*Transport, error) {
// registerHTTPSProtocol calls Transport.RegisterProtocol but // registerHTTPSProtocol calls Transport.RegisterProtocol but
// converting panics into errors. // converting panics into errors.
func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err error) {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
err = fmt.Errorf("%v", e) err = fmt.Errorf("%v", e)
@ -69,10 +69,12 @@ func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error)
// noDialH2RoundTripper is a RoundTripper which only tries to complete the request // noDialH2RoundTripper is a RoundTripper which only tries to complete the request
// if there's already has a cached connection to the host. // if there's already has a cached connection to the host.
type noDialH2RoundTripper struct{ t *Transport } // (The field is exported so it can be accessed via reflect from net/http; tested
// by TestNoDialH2RoundTripperType)
type noDialH2RoundTripper struct{ *Transport }
func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
res, err := rt.t.RoundTrip(req) res, err := rt.Transport.RoundTrip(req)
if isNoCachedConnError(err) { if isNoCachedConnError(err) {
return nil, http.ErrSkipAltProtocol return nil, http.ErrSkipAltProtocol
} }

View File

@ -41,10 +41,10 @@ func (f *flow) take(n int32) {
// add adds n bytes (positive or negative) to the flow control window. // add adds n bytes (positive or negative) to the flow control window.
// It returns false if the sum would exceed 2^31-1. // It returns false if the sum would exceed 2^31-1.
func (f *flow) add(n int32) bool { func (f *flow) add(n int32) bool {
remain := (1<<31 - 1) - f.n sum := f.n + n
if n > remain { if (sum > n) == (f.n > 0) {
return false f.n = sum
}
f.n += n
return true return true
}
return false
} }

View File

@ -14,8 +14,8 @@ import (
"strings" "strings"
"sync" "sync"
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack" "golang.org/x/net/http2/hpack"
"golang.org/x/net/lex/httplex"
) )
const frameHeaderLen = 9 const frameHeaderLen = 9
@ -733,32 +733,67 @@ func (f *SettingsFrame) IsAck() bool {
return f.FrameHeader.Flags.Has(FlagSettingsAck) return f.FrameHeader.Flags.Has(FlagSettingsAck)
} }
func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) { func (f *SettingsFrame) Value(id SettingID) (v uint32, ok bool) {
f.checkValid() f.checkValid()
buf := f.p for i := 0; i < f.NumSettings(); i++ {
for len(buf) > 0 { if s := f.Setting(i); s.ID == id {
settingID := SettingID(binary.BigEndian.Uint16(buf[:2])) return s.Val, true
if settingID == s {
return binary.BigEndian.Uint32(buf[2:6]), true
} }
buf = buf[6:]
} }
return 0, false return 0, false
} }
// Setting returns the setting from the frame at the given 0-based index.
// The index must be >= 0 and less than f.NumSettings().
func (f *SettingsFrame) Setting(i int) Setting {
buf := f.p
return Setting{
ID: SettingID(binary.BigEndian.Uint16(buf[i*6 : i*6+2])),
Val: binary.BigEndian.Uint32(buf[i*6+2 : i*6+6]),
}
}
func (f *SettingsFrame) NumSettings() int { return len(f.p) / 6 }
// HasDuplicates reports whether f contains any duplicate setting IDs.
func (f *SettingsFrame) HasDuplicates() bool {
num := f.NumSettings()
if num == 0 {
return false
}
// If it's small enough (the common case), just do the n^2
// thing and avoid a map allocation.
if num < 10 {
for i := 0; i < num; i++ {
idi := f.Setting(i).ID
for j := i + 1; j < num; j++ {
idj := f.Setting(j).ID
if idi == idj {
return true
}
}
}
return false
}
seen := map[SettingID]bool{}
for i := 0; i < num; i++ {
id := f.Setting(i).ID
if seen[id] {
return true
}
seen[id] = true
}
return false
}
// ForeachSetting runs fn for each setting. // ForeachSetting runs fn for each setting.
// It stops and returns the first error. // It stops and returns the first error.
func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error {
f.checkValid() f.checkValid()
buf := f.p for i := 0; i < f.NumSettings(); i++ {
for len(buf) > 0 { if err := fn(f.Setting(i)); err != nil {
if err := fn(Setting{
SettingID(binary.BigEndian.Uint16(buf[:2])),
binary.BigEndian.Uint32(buf[2:6]),
}); err != nil {
return err return err
} }
buf = buf[6:]
} }
return nil return nil
} }
@ -1462,7 +1497,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
if VerboseLogs && fr.logReads { if VerboseLogs && fr.logReads {
fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) fr.debugReadLoggerf("http2: decoded hpack field %+v", hf)
} }
if !httplex.ValidHeaderFieldValue(hf.Value) { if !httpguts.ValidHeaderFieldValue(hf.Value) {
invalid = headerFieldValueError(hf.Value) invalid = headerFieldValueError(hf.Value)
} }
isPseudo := strings.HasPrefix(hf.Name, ":") isPseudo := strings.HasPrefix(hf.Name, ":")

26
vendor/golang.org/x/net/http2/go111.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.11
package http2
import "net/textproto"
func traceHasWroteHeaderField(trace *clientTrace) bool {
return trace != nil && trace.WroteHeaderField != nil
}
func traceWroteHeaderField(trace *clientTrace, k, v string) {
if trace != nil && trace.WroteHeaderField != nil {
trace.WroteHeaderField(k, []string{v})
}
}
func traceGot1xxResponseFunc(trace *clientTrace) func(int, textproto.MIMEHeader) error {
if trace != nil {
return trace.Got1xxResponse
}
return nil
}

View File

@ -18,6 +18,8 @@ type contextContext interface {
context.Context context.Context
} }
var errCanceled = context.Canceled
func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
ctx, cancel = context.WithCancel(context.Background()) ctx, cancel = context.WithCancel(context.Background())
ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
@ -48,6 +50,14 @@ func (t *Transport) idleConnTimeout() time.Duration {
func setResponseUncompressed(res *http.Response) { res.Uncompressed = true } func setResponseUncompressed(res *http.Response) { res.Uncompressed = true }
func traceGetConn(req *http.Request, hostPort string) {
trace := httptrace.ContextClientTrace(req.Context())
if trace == nil || trace.GetConn == nil {
return
}
trace.GetConn(hostPort)
}
func traceGotConn(req *http.Request, cc *ClientConn) { func traceGotConn(req *http.Request, cc *ClientConn) {
trace := httptrace.ContextClientTrace(req.Context()) trace := httptrace.ContextClientTrace(req.Context())
if trace == nil || trace.GotConn == nil { if trace == nil || trace.GotConn == nil {
@ -104,3 +114,8 @@ func requestTrace(req *http.Request) *clientTrace {
func (cc *ClientConn) Ping(ctx context.Context) error { func (cc *ClientConn) Ping(ctx context.Context) error {
return cc.ping(ctx) return cc.ping(ctx)
} }
// Shutdown gracefully closes the client connection, waiting for running streams to complete.
func (cc *ClientConn) Shutdown(ctx context.Context) error {
return cc.shutdown(ctx)
}

View File

@ -7,15 +7,21 @@ package http2
import ( import (
"net/http" "net/http"
"strings" "strings"
"sync"
) )
var ( var (
commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case commonBuildOnce sync.Once
commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case
commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case
) )
func init() { func buildCommonHeaderMapsOnce() {
for _, v := range []string{ commonBuildOnce.Do(buildCommonHeaderMaps)
}
func buildCommonHeaderMaps() {
common := []string{
"accept", "accept",
"accept-charset", "accept-charset",
"accept-encoding", "accept-encoding",
@ -63,7 +69,10 @@ func init() {
"vary", "vary",
"via", "via",
"www-authenticate", "www-authenticate",
} { }
commonLowerHeader = make(map[string]string, len(common))
commonCanonHeader = make(map[string]string, len(common))
for _, v := range common {
chk := http.CanonicalHeaderKey(v) chk := http.CanonicalHeaderKey(v)
commonLowerHeader[chk] = v commonLowerHeader[chk] = v
commonCanonHeader[v] = chk commonCanonHeader[v] = chk
@ -71,6 +80,7 @@ func init() {
} }
func lowerHeader(v string) string { func lowerHeader(v string) string {
buildCommonHeaderMapsOnce()
if s, ok := commonLowerHeader[v]; ok { if s, ok := commonLowerHeader[v]; ok {
return s return s
} }

View File

@ -206,7 +206,7 @@ func appendVarInt(dst []byte, n byte, i uint64) []byte {
} }
// appendHpackString appends s, as encoded in "String Literal" // appendHpackString appends s, as encoded in "String Literal"
// representation, to dst and returns the the extended buffer. // representation, to dst and returns the extended buffer.
// //
// s will be encoded in Huffman codes only when it produces strictly // s will be encoded in Huffman codes only when it produces strictly
// shorter byte string. // shorter byte string.

View File

@ -389,6 +389,12 @@ func (d *Decoder) callEmit(hf HeaderField) error {
// (same invariants and behavior as parseHeaderFieldRepr) // (same invariants and behavior as parseHeaderFieldRepr)
func (d *Decoder) parseDynamicTableSizeUpdate() error { func (d *Decoder) parseDynamicTableSizeUpdate() error {
// RFC 7541, sec 4.2: This dynamic table size update MUST occur at the
// beginning of the first header block following the change to the dynamic table size.
if d.dynTab.size > 0 {
return DecodingError{errors.New("dynamic table size update MUST occur at the beginning of a header block")}
}
buf := d.buf buf := d.buf
size, buf, err := readVarInt(5, buf) size, buf, err := readVarInt(5, buf)
if err != nil { if err != nil {

View File

@ -47,6 +47,7 @@ var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
// If maxLen is greater than 0, attempts to write more to buf than // If maxLen is greater than 0, attempts to write more to buf than
// maxLen bytes will return ErrStringLength. // maxLen bytes will return ErrStringLength.
func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
rootHuffmanNode := getRootHuffmanNode()
n := rootHuffmanNode n := rootHuffmanNode
// cur is the bit buffer that has not been fed into n. // cur is the bit buffer that has not been fed into n.
// cbits is the number of low order bits in cur that are valid. // cbits is the number of low order bits in cur that are valid.
@ -106,7 +107,7 @@ func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
type node struct { type node struct {
// children is non-nil for internal nodes // children is non-nil for internal nodes
children []*node children *[256]*node
// The following are only valid if children is nil: // The following are only valid if children is nil:
codeLen uint8 // number of bits that led to the output of sym codeLen uint8 // number of bits that led to the output of sym
@ -114,22 +115,31 @@ type node struct {
} }
func newInternalNode() *node { func newInternalNode() *node {
return &node{children: make([]*node, 256)} return &node{children: new([256]*node)}
} }
var rootHuffmanNode = newInternalNode() var (
buildRootOnce sync.Once
lazyRootHuffmanNode *node
)
func init() { func getRootHuffmanNode() *node {
buildRootOnce.Do(buildRootHuffmanNode)
return lazyRootHuffmanNode
}
func buildRootHuffmanNode() {
if len(huffmanCodes) != 256 { if len(huffmanCodes) != 256 {
panic("unexpected size") panic("unexpected size")
} }
lazyRootHuffmanNode = newInternalNode()
for i, code := range huffmanCodes { for i, code := range huffmanCodes {
addDecoderNode(byte(i), code, huffmanCodeLen[i]) addDecoderNode(byte(i), code, huffmanCodeLen[i])
} }
} }
func addDecoderNode(sym byte, code uint32, codeLen uint8) { func addDecoderNode(sym byte, code uint32, codeLen uint8) {
cur := rootHuffmanNode cur := lazyRootHuffmanNode
for codeLen > 8 { for codeLen > 8 {
codeLen -= 8 codeLen -= 8
i := uint8(code >> codeLen) i := uint8(code >> codeLen)

View File

@ -29,7 +29,7 @@ import (
"strings" "strings"
"sync" "sync"
"golang.org/x/net/lex/httplex" "golang.org/x/net/http/httpguts"
) )
var ( var (
@ -179,7 +179,7 @@ var (
) )
// validWireHeaderFieldName reports whether v is a valid header field // validWireHeaderFieldName reports whether v is a valid header field
// name (key). See httplex.ValidHeaderName for the base rules. // name (key). See httpguts.ValidHeaderName for the base rules.
// //
// Further, http2 says: // Further, http2 says:
// "Just as in HTTP/1.x, header field names are strings of ASCII // "Just as in HTTP/1.x, header field names are strings of ASCII
@ -191,7 +191,7 @@ func validWireHeaderFieldName(v string) bool {
return false return false
} }
for _, r := range v { for _, r := range v {
if !httplex.IsTokenRune(r) { if !httpguts.IsTokenRune(r) {
return false return false
} }
if 'A' <= r && r <= 'Z' { if 'A' <= r && r <= 'Z' {
@ -201,19 +201,12 @@ func validWireHeaderFieldName(v string) bool {
return true return true
} }
var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n)
func init() {
for i := 100; i <= 999; i++ {
if v := http.StatusText(i); v != "" {
httpCodeStringCommon[i] = strconv.Itoa(i)
}
}
}
func httpCodeString(code int) string { func httpCodeString(code int) string {
if s, ok := httpCodeStringCommon[code]; ok { switch code {
return s case 200:
return "200"
case 404:
return "404"
} }
return strconv.Itoa(code) return strconv.Itoa(code)
} }
@ -312,7 +305,7 @@ func mustUint31(v int32) uint32 {
} }
// bodyAllowedForStatus reports whether a given response status code // bodyAllowedForStatus reports whether a given response status code
// permits a body. See RFC 2616, section 4.4. // permits a body. See RFC 7230, section 3.3.
func bodyAllowedForStatus(status int) bool { func bodyAllowedForStatus(status int) bool {
switch { switch {
case status >= 100 && status <= 199: case status >= 100 && status <= 199:

17
vendor/golang.org/x/net/http2/not_go111.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.11
package http2
import "net/textproto"
func traceHasWroteHeaderField(trace *clientTrace) bool { return false }
func traceWroteHeaderField(trace *clientTrace, k, v string) {}
func traceGot1xxResponseFunc(trace *clientTrace) func(int, textproto.MIMEHeader) error {
return nil
}

View File

@ -8,6 +8,7 @@ package http2
import ( import (
"crypto/tls" "crypto/tls"
"errors"
"net" "net"
"net/http" "net/http"
"time" "time"
@ -18,6 +19,8 @@ type contextContext interface {
Err() error Err() error
} }
var errCanceled = errors.New("canceled")
type fakeContext struct{} type fakeContext struct{}
func (fakeContext) Done() <-chan struct{} { return nil } func (fakeContext) Done() <-chan struct{} { return nil }
@ -34,6 +37,7 @@ func setResponseUncompressed(res *http.Response) {
type clientTrace struct{} type clientTrace struct{}
func requestTrace(*http.Request) *clientTrace { return nil } func requestTrace(*http.Request) *clientTrace { return nil }
func traceGetConn(*http.Request, string) {}
func traceGotConn(*http.Request, *ClientConn) {} func traceGotConn(*http.Request, *ClientConn) {}
func traceFirstResponseByte(*clientTrace) {} func traceFirstResponseByte(*clientTrace) {}
func traceWroteHeaders(*clientTrace) {} func traceWroteHeaders(*clientTrace) {}
@ -84,4 +88,8 @@ func (cc *ClientConn) Ping(ctx contextContext) error {
return cc.ping(ctx) return cc.ping(ctx)
} }
func (cc *ClientConn) Shutdown(ctx contextContext) error {
return cc.shutdown(ctx)
}
func (t *Transport) idleConnTimeout() time.Duration { return 0 } func (t *Transport) idleConnTimeout() time.Duration { return 0 }

View File

@ -46,6 +46,7 @@ import (
"sync" "sync"
"time" "time"
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack" "golang.org/x/net/http2/hpack"
) )
@ -406,7 +407,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
// addresses during development. // addresses during development.
// //
// TODO: optionally enforce? Or enforce at the time we receive // TODO: optionally enforce? Or enforce at the time we receive
// a new request, and verify the the ServerName matches the :authority? // a new request, and verify the ServerName matches the :authority?
// But that precludes proxy situations, perhaps. // But that precludes proxy situations, perhaps.
// //
// So for now, do nothing here again. // So for now, do nothing here again.
@ -662,6 +663,7 @@ func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
func (sc *serverConn) canonicalHeader(v string) string { func (sc *serverConn) canonicalHeader(v string) string {
sc.serveG.check() sc.serveG.check()
buildCommonHeaderMapsOnce()
cv, ok := commonCanonHeader[v] cv, ok := commonCanonHeader[v]
if ok { if ok {
return cv return cv
@ -1486,6 +1488,12 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error {
} }
return nil return nil
} }
if f.NumSettings() > 100 || f.HasDuplicates() {
// This isn't actually in the spec, but hang up on
// suspiciously large settings frames or those with
// duplicate entries.
return ConnectionError(ErrCodeProtocol)
}
if err := f.ForeachSetting(sc.processSetting); err != nil { if err := f.ForeachSetting(sc.processSetting); err != nil {
return err return err
} }
@ -1574,6 +1582,12 @@ func (sc *serverConn) processData(f *DataFrame) error {
// type PROTOCOL_ERROR." // type PROTOCOL_ERROR."
return ConnectionError(ErrCodeProtocol) return ConnectionError(ErrCodeProtocol)
} }
// RFC 7540, sec 6.1: If a DATA frame is received whose stream is not in
// "open" or "half-closed (local)" state, the recipient MUST respond with a
// stream error (Section 5.4.2) of type STREAM_CLOSED.
if state == stateClosed {
return streamError(id, ErrCodeStreamClosed)
}
if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued {
// This includes sending a RST_STREAM if the stream is // This includes sending a RST_STREAM if the stream is
// in stateHalfClosedLocal (which currently means that // in stateHalfClosedLocal (which currently means that
@ -1607,7 +1621,10 @@ func (sc *serverConn) processData(f *DataFrame) error {
// Sender sending more than they'd declared? // Sender sending more than they'd declared?
if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
return streamError(id, ErrCodeStreamClosed) // RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
// value of a content-length header field does not equal the sum of the
// DATA frame payload lengths that form the body.
return streamError(id, ErrCodeProtocol)
} }
if f.Length > 0 { if f.Length > 0 {
// Check whether the client has flow control quota. // Check whether the client has flow control quota.
@ -1717,6 +1734,13 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// processing this frame. // processing this frame.
return nil return nil
} }
// RFC 7540, sec 5.1: If an endpoint receives additional frames, other than
// WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in
// this state, it MUST respond with a stream error (Section 5.4.2) of
// type STREAM_CLOSED.
if st.state == stateHalfClosedRemote {
return streamError(id, ErrCodeStreamClosed)
}
return st.processTrailerHeaders(f) return st.processTrailerHeaders(f)
} }
@ -1817,7 +1841,7 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
if st.trailer != nil { if st.trailer != nil {
for _, hf := range f.RegularFields() { for _, hf := range f.RegularFields() {
key := sc.canonicalHeader(hf.Name) key := sc.canonicalHeader(hf.Name)
if !ValidTrailerHeader(key) { if !httpguts.ValidTrailerHeader(key) {
// TODO: send more details to the peer somehow. But http2 has // TODO: send more details to the peer somehow. But http2 has
// no way to send debug data at a stream level. Discuss with // no way to send debug data at a stream level. Discuss with
// HTTP folk. // HTTP folk.
@ -2284,8 +2308,8 @@ func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) !=
// written in the trailers at the end of the response. // written in the trailers at the end of the response.
func (rws *responseWriterState) declareTrailer(k string) { func (rws *responseWriterState) declareTrailer(k string) {
k = http.CanonicalHeaderKey(k) k = http.CanonicalHeaderKey(k)
if !ValidTrailerHeader(k) { if !httpguts.ValidTrailerHeader(k) {
// Forbidden by RFC 2616 14.40. // Forbidden by RFC 7230, section 4.1.2.
rws.conn.logf("ignoring invalid trailer %q", k) rws.conn.logf("ignoring invalid trailer %q", k)
return return
} }
@ -2335,6 +2359,19 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
foreachHeaderElement(v, rws.declareTrailer) foreachHeaderElement(v, rws.declareTrailer)
} }
// "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2),
// but respect "Connection" == "close" to mean sending a GOAWAY and tearing
// down the TCP connection when idle, like we do for HTTP/1.
// TODO: remove more Connection-specific header fields here, in addition
// to "Connection".
if _, ok := rws.snapHeader["Connection"]; ok {
v := rws.snapHeader.Get("Connection")
delete(rws.snapHeader, "Connection")
if v == "close" {
rws.conn.startGracefulShutdown()
}
}
endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
streamID: rws.stream.id, streamID: rws.stream.id,
@ -2406,7 +2443,7 @@ const TrailerPrefix = "Trailer:"
// after the header has already been flushed. Because the Go // after the header has already been flushed. Because the Go
// ResponseWriter interface has no way to set Trailers (only the // ResponseWriter interface has no way to set Trailers (only the
// Header), and because we didn't want to expand the ResponseWriter // Header), and because we didn't want to expand the ResponseWriter
// interface, and because nobody used trailers, and because RFC 2616 // interface, and because nobody used trailers, and because RFC 7230
// says you SHOULD (but not must) predeclare any trailers in the // says you SHOULD (but not must) predeclare any trailers in the
// header, the official ResponseWriter rules said trailers in Go must // header, the official ResponseWriter rules said trailers in Go must
// be predeclared, and then we reuse the same ResponseWriter.Header() // be predeclared, and then we reuse the same ResponseWriter.Header()
@ -2790,7 +2827,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) {
} }
// foreachHeaderElement splits v according to the "#rule" construction // foreachHeaderElement splits v according to the "#rule" construction
// in RFC 2616 section 2.1 and calls fn for each non-empty element. // in RFC 7230 section 7 and calls fn for each non-empty element.
func foreachHeaderElement(v string, fn func(string)) { func foreachHeaderElement(v string, fn func(string)) {
v = textproto.TrimString(v) v = textproto.TrimString(v)
if v == "" { if v == "" {
@ -2838,41 +2875,6 @@ func new400Handler(err error) http.HandlerFunc {
} }
} }
// ValidTrailerHeader reports whether name is a valid header field name to appear
// in trailers.
// See: http://tools.ietf.org/html/rfc7230#section-4.1.2
func ValidTrailerHeader(name string) bool {
name = http.CanonicalHeaderKey(name)
if strings.HasPrefix(name, "If-") || badTrailer[name] {
return false
}
return true
}
var badTrailer = map[string]bool{
"Authorization": true,
"Cache-Control": true,
"Connection": true,
"Content-Encoding": true,
"Content-Length": true,
"Content-Range": true,
"Content-Type": true,
"Expect": true,
"Host": true,
"Keep-Alive": true,
"Max-Forwards": true,
"Pragma": true,
"Proxy-Authenticate": true,
"Proxy-Authorization": true,
"Proxy-Connection": true,
"Range": true,
"Realm": true,
"Te": true,
"Trailer": true,
"Transfer-Encoding": true,
"Www-Authenticate": true,
}
// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives // h1ServerKeepAlivesDisabled reports whether hs has its keep-alives
// disabled. See comments on h1ServerShutdownChan above for why // disabled. See comments on h1ServerShutdownChan above for why
// the code is written this way. // the code is written this way.

View File

@ -21,15 +21,16 @@ import (
mathrand "math/rand" mathrand "math/rand"
"net" "net"
"net/http" "net/http"
"net/textproto"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack" "golang.org/x/net/http2/hpack"
"golang.org/x/net/idna" "golang.org/x/net/idna"
"golang.org/x/net/lex/httplex"
) )
const ( const (
@ -159,6 +160,7 @@ type ClientConn struct {
cond *sync.Cond // hold mu; broadcast on flow/closed changes cond *sync.Cond // hold mu; broadcast on flow/closed changes
flow flow // our conn-level flow control quota (cs.flow is per stream) flow flow // our conn-level flow control quota (cs.flow is per stream)
inflow flow // peer's conn-level flow control inflow flow // peer's conn-level flow control
closing bool
closed bool closed bool
wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
@ -214,6 +216,7 @@ type clientStream struct {
firstByte bool // got the first response byte firstByte bool // got the first response byte
pastHeaders bool // got first MetaHeadersFrame (actual headers) pastHeaders bool // got first MetaHeadersFrame (actual headers)
pastTrailers bool // got optional second MetaHeadersFrame (trailers) pastTrailers bool // got optional second MetaHeadersFrame (trailers)
num1xx uint8 // number of 1xx responses seen
trailer http.Header // accumulated trailers trailer http.Header // accumulated trailers
resTrailer *http.Header // client's Response.Trailer resTrailer *http.Header // client's Response.Trailer
@ -237,6 +240,17 @@ func awaitRequestCancel(req *http.Request, done <-chan struct{}) error {
} }
} }
var got1xxFuncForTests func(int, textproto.MIMEHeader) error
// get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func,
// if any. It returns nil if not set or if the Go version is too old.
func (cs *clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error {
if fn := got1xxFuncForTests; fn != nil {
return fn
}
return traceGot1xxResponseFunc(cs.trace)
}
// awaitRequestCancel waits for the user to cancel a request, its context to // awaitRequestCancel waits for the user to cancel a request, its context to
// expire, or for the request to be done (any way it might be removed from the // expire, or for the request to be done (any way it might be removed from the
// cc.streams map: peer reset, successful completion, TCP connection breakage, // cc.streams map: peer reset, successful completion, TCP connection breakage,
@ -423,20 +437,17 @@ func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*htt
if !canRetryError(err) { if !canRetryError(err) {
return nil, err return nil, err
} }
if !afterBodyWrite {
return req, nil
}
// If the Body is nil (or http.NoBody), it's safe to reuse // If the Body is nil (or http.NoBody), it's safe to reuse
// this request and its Body. // this request and its Body.
if req.Body == nil || reqBodyIsNoBody(req.Body) { if req.Body == nil || reqBodyIsNoBody(req.Body) {
return req, nil return req, nil
} }
// Otherwise we depend on the Request having its GetBody
// func defined. // If the request body can be reset back to its original
// state via the optional req.GetBody, do that.
getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody
if getBody == nil { if getBody != nil {
return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) // TODO: consider a req.Body.Close here? or audit that all caller paths do?
}
body, err := getBody() body, err := getBody()
if err != nil { if err != nil {
return nil, err return nil, err
@ -444,6 +455,18 @@ func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*htt
newReq := *req newReq := *req
newReq.Body = body newReq.Body = body
return &newReq, nil return &newReq, nil
}
// The Request.Body can't reset back to the beginning, but we
// don't seem to have started to read from it yet, so reuse
// the request directly. The "afterBodyWrite" means the
// bodyWrite process has started, which becomes true before
// the first Read.
if !afterBodyWrite {
return req, nil
}
return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err)
} }
func canRetryError(err error) bool { func canRetryError(err error) bool {
@ -567,6 +590,10 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
// henc in response to SETTINGS frames? // henc in response to SETTINGS frames?
cc.henc = hpack.NewEncoder(&cc.hbuf) cc.henc = hpack.NewEncoder(&cc.hbuf)
if t.AllowHTTP {
cc.nextStreamID = 3
}
if cs, ok := c.(connectionStater); ok { if cs, ok := c.(connectionStater); ok {
state := cs.ConnectionState() state := cs.ConnectionState()
cc.tlsState = &state cc.tlsState = &state
@ -626,12 +653,32 @@ func (cc *ClientConn) CanTakeNewRequest() bool {
return cc.canTakeNewRequestLocked() return cc.canTakeNewRequestLocked()
} }
func (cc *ClientConn) canTakeNewRequestLocked() bool { // clientConnIdleState describes the suitability of a client
// connection to initiate a new RoundTrip request.
type clientConnIdleState struct {
canTakeNewRequest bool
freshConn bool // whether it's unused by any previous request
}
func (cc *ClientConn) idleState() clientConnIdleState {
cc.mu.Lock()
defer cc.mu.Unlock()
return cc.idleStateLocked()
}
func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
if cc.singleUse && cc.nextStreamID > 1 { if cc.singleUse && cc.nextStreamID > 1 {
return false return
} }
return cc.goAway == nil && !cc.closed && st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing &&
int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32 int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32
st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest
return
}
func (cc *ClientConn) canTakeNewRequestLocked() bool {
st := cc.idleStateLocked()
return st.canTakeNewRequest
} }
// onIdleTimeout is called from a time.AfterFunc goroutine. It will // onIdleTimeout is called from a time.AfterFunc goroutine. It will
@ -661,6 +708,88 @@ func (cc *ClientConn) closeIfIdle() {
cc.tconn.Close() cc.tconn.Close()
} }
var shutdownEnterWaitStateHook = func() {}
// Shutdown gracefully close the client connection, waiting for running streams to complete.
// Public implementation is in go17.go and not_go17.go
func (cc *ClientConn) shutdown(ctx contextContext) error {
if err := cc.sendGoAway(); err != nil {
return err
}
// Wait for all in-flight streams to complete or connection to close
done := make(chan error, 1)
cancelled := false // guarded by cc.mu
go func() {
cc.mu.Lock()
defer cc.mu.Unlock()
for {
if len(cc.streams) == 0 || cc.closed {
cc.closed = true
done <- cc.tconn.Close()
break
}
if cancelled {
break
}
cc.cond.Wait()
}
}()
shutdownEnterWaitStateHook()
select {
case err := <-done:
return err
case <-ctx.Done():
cc.mu.Lock()
// Free the goroutine above
cancelled = true
cc.cond.Broadcast()
cc.mu.Unlock()
return ctx.Err()
}
}
func (cc *ClientConn) sendGoAway() error {
cc.mu.Lock()
defer cc.mu.Unlock()
cc.wmu.Lock()
defer cc.wmu.Unlock()
if cc.closing {
// GOAWAY sent already
return nil
}
// Send a graceful shutdown frame to server
maxStreamID := cc.nextStreamID
if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil {
return err
}
if err := cc.bw.Flush(); err != nil {
return err
}
// Prevent new requests
cc.closing = true
return nil
}
// Close closes the client connection immediately.
//
// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
func (cc *ClientConn) Close() error {
cc.mu.Lock()
defer cc.cond.Broadcast()
defer cc.mu.Unlock()
err := errors.New("http2: client connection force closed via ClientConn.Close")
for id, cs := range cc.streams {
select {
case cs.resc <- resAndError{err: err}:
default:
}
cs.bufPipe.CloseWithError(err)
delete(cc.streams, id)
}
cc.closed = true
return cc.tconn.Close()
}
const maxAllocFrameSize = 512 << 10 const maxAllocFrameSize = 512 << 10
// frameBuffer returns a scratch buffer suitable for writing DATA frames. // frameBuffer returns a scratch buffer suitable for writing DATA frames.
@ -743,7 +872,7 @@ func checkConnHeaders(req *http.Request) error {
if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
} }
if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") { if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !strings.EqualFold(vv[0], "close") && !strings.EqualFold(vv[0], "keep-alive")) {
return fmt.Errorf("http2: invalid Connection request header: %q", vv) return fmt.Errorf("http2: invalid Connection request header: %q", vv)
} }
return nil return nil
@ -951,6 +1080,9 @@ func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error {
for { for {
cc.lastActive = time.Now() cc.lastActive = time.Now()
if cc.closed || !cc.canTakeNewRequestLocked() { if cc.closed || !cc.canTakeNewRequestLocked() {
if waitingForConn != nil {
close(waitingForConn)
}
return errClientConnUnusable return errClientConnUnusable
} }
if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) { if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) {
@ -1174,7 +1306,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
if host == "" { if host == "" {
host = req.URL.Host host = req.URL.Host
} }
host, err := httplex.PunycodeHostPort(host) host, err := httpguts.PunycodeHostPort(host)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1199,11 +1331,11 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
// potentially pollute our hpack state. (We want to be able to // potentially pollute our hpack state. (We want to be able to
// continue to reuse the hpack encoder for future requests) // continue to reuse the hpack encoder for future requests)
for k, vv := range req.Header { for k, vv := range req.Header {
if !httplex.ValidHeaderFieldName(k) { if !httpguts.ValidHeaderFieldName(k) {
return nil, fmt.Errorf("invalid HTTP header name %q", k) return nil, fmt.Errorf("invalid HTTP header name %q", k)
} }
for _, v := range vv { for _, v := range vv {
if !httplex.ValidHeaderFieldValue(v) { if !httpguts.ValidHeaderFieldValue(v) {
return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k)
} }
} }
@ -1284,9 +1416,16 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
return nil, errRequestHeaderListSize return nil, errRequestHeaderListSize
} }
trace := requestTrace(req)
traceHeaders := traceHasWroteHeaderField(trace)
// Header list size is ok. Write the headers. // Header list size is ok. Write the headers.
enumerateHeaders(func(name, value string) { enumerateHeaders(func(name, value string) {
cc.writeHeader(strings.ToLower(name), value) name = strings.ToLower(name)
cc.writeHeader(name, value)
if traceHeaders {
traceWroteHeaderField(trace, name, value)
}
}) })
return cc.hbuf.Bytes(), nil return cc.hbuf.Bytes(), nil
@ -1608,8 +1747,7 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
// is the detail. // is the detail.
// //
// As a special case, handleResponse may return (nil, nil) to skip the // As a special case, handleResponse may return (nil, nil) to skip the
// frame (currently only used for 100 expect continue). This special // frame (currently only used for 1xx responses).
// case is going away after Issue 13851 is fixed.
func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) {
if f.Truncated { if f.Truncated {
return nil, errResponseHeaderListSize return nil, errResponseHeaderListSize
@ -1624,15 +1762,6 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header")
} }
if statusCode == 100 {
traceGot100Continue(cs.trace)
if cs.on100 != nil {
cs.on100() // forces any write delay timer to fire
}
cs.pastHeaders = false // do it all again
return nil, nil
}
header := make(http.Header) header := make(http.Header)
res := &http.Response{ res := &http.Response{
Proto: "HTTP/2.0", Proto: "HTTP/2.0",
@ -1657,6 +1786,27 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
} }
} }
if statusCode >= 100 && statusCode <= 199 {
cs.num1xx++
const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http
if cs.num1xx > max1xxResponses {
return nil, errors.New("http2: too many 1xx informational responses")
}
if fn := cs.get1xxTraceFunc(); fn != nil {
if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil {
return nil, err
}
}
if statusCode == 100 {
traceGot100Continue(cs.trace)
if cs.on100 != nil {
cs.on100() // forces any write delay timer to fire
}
}
cs.pastHeaders = false // do it all again
return nil, nil
}
streamEnded := f.StreamEnded() streamEnded := f.StreamEnded()
isHead := cs.req.Method == "HEAD" isHead := cs.req.Method == "HEAD"
if !streamEnded || isHead { if !streamEnded || isHead {
@ -2244,7 +2394,7 @@ func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s body
} }
s.delay = t.expectContinueTimeout() s.delay = t.expectContinueTimeout()
if s.delay == 0 || if s.delay == 0 ||
!httplex.HeaderValuesContainsToken( !httpguts.HeaderValuesContainsToken(
cs.req.Header["Expect"], cs.req.Header["Expect"],
"100-continue") { "100-continue") {
return return
@ -2299,5 +2449,5 @@ func (s bodyWriterState) scheduleBodyWrite() {
// isConnectionCloseRequest reports whether req should use its own // isConnectionCloseRequest reports whether req should use its own
// connection for a single request and then close the connection. // connection for a single request and then close the connection.
func isConnectionCloseRequest(req *http.Request) bool { func isConnectionCloseRequest(req *http.Request) bool {
return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close") return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close")
} }

View File

@ -11,8 +11,8 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack" "golang.org/x/net/http2/hpack"
"golang.org/x/net/lex/httplex"
) )
// writeFramer is implemented by any type that is used to write frames. // writeFramer is implemented by any type that is used to write frames.
@ -199,7 +199,7 @@ func (w *writeResHeaders) staysWithinBuffer(max int) bool {
// TODO: this is a common one. It'd be nice to return true // TODO: this is a common one. It'd be nice to return true
// here and get into the fast path if we could be clever and // here and get into the fast path if we could be clever and
// calculate the size fast enough, or at least a conservative // calculate the size fast enough, or at least a conservative
// uppper bound that usually fires. (Maybe if w.h and // upper bound that usually fires. (Maybe if w.h and
// w.trailers are nil, so we don't need to enumerate it.) // w.trailers are nil, so we don't need to enumerate it.)
// Otherwise I'm afraid that just calculating the length to // Otherwise I'm afraid that just calculating the length to
// answer this question would be slower than the ~2µs benefit. // answer this question would be slower than the ~2µs benefit.
@ -350,7 +350,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
} }
isTE := k == "transfer-encoding" isTE := k == "transfer-encoding"
for _, v := range vv { for _, v := range vv {
if !httplex.ValidHeaderFieldValue(v) { if !httpguts.ValidHeaderFieldValue(v) {
// TODO: return an error? golang.org/issue/14048 // TODO: return an error? golang.org/issue/14048
// For now just omit it. // For now just omit it.
continue continue

14
vendor/golang.org/x/sys/unix/aliases.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
// +build go1.9
package unix
import "syscall"
type Signal = syscall.Signal
type Errno = syscall.Errno
type SysProcAttr = syscall.SysProcAttr

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux netbsd openbsd solaris // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package unix package unix

27
vendor/golang.org/x/sys/unix/dev_aix_ppc.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build aix
// +build ppc
// Functions to access/create device major and minor numbers matching the
// encoding used by AIX.
package unix
// Major returns the major component of a Linux device number.
func Major(dev uint64) uint32 {
return uint32((dev >> 16) & 0xffff)
}
// Minor returns the minor component of a Linux device number.
func Minor(dev uint64) uint32 {
return uint32(dev & 0xffff)
}
// Mkdev returns a Linux device number generated from the given major and minor
// components.
func Mkdev(major, minor uint32) uint64 {
return uint64(((major) << 16) | (minor))
}

29
vendor/golang.org/x/sys/unix/dev_aix_ppc64.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build aix
// +build ppc64
// Functions to access/create device major and minor numbers matching the
// encoding used AIX.
package unix
// Major returns the major component of a Linux device number.
func Major(dev uint64) uint32 {
return uint32((dev & 0x3fffffff00000000) >> 32)
}
// Minor returns the minor component of a Linux device number.
func Minor(dev uint64) uint32 {
return uint32((dev & 0x00000000ffffffff) >> 0)
}
// Mkdev returns a Linux device number generated from the given major and minor
// components.
func Mkdev(major, minor uint32) uint64 {
var DEVNO64 uint64
DEVNO64 = 0x8000000000000000
return ((uint64(major) << 32) | (uint64(minor) & 0x00000000FFFFFFFF) | DEVNO64)
}

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris // +build aix darwin dragonfly freebsd linux nacl netbsd openbsd solaris
package unix package unix

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux netbsd openbsd solaris // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
// Unix environment variables. // Unix environment variables.

View File

@ -14,7 +14,11 @@ var fcntl64Syscall uintptr = SYS_FCNTL
// FcntlInt performs a fcntl syscall on fd with the provided command and argument. // FcntlInt performs a fcntl syscall on fd with the provided command and argument.
func FcntlInt(fd uintptr, cmd, arg int) (int, error) { func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
valptr, _, err := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(arg)) valptr, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(arg))
var err error
if errno != 0 {
err = errno
}
return int(valptr), err return int(valptr), err
} }

View File

@ -3,6 +3,7 @@
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build gccgo // +build gccgo
// +build !aix
package unix package unix

Some files were not shown because too many files have changed in this diff Show More