mirror of
https://github.com/Luzifer/vault2env.git
synced 2024-11-09 16:50:06 +00:00
Switch to dep for vendoring, update vendored libs
Signed-off-by: Knut Ahlers <knut@ahlers.me>
This commit is contained in:
parent
6450bcb8a4
commit
d57b70ff75
636 changed files with 288578 additions and 35497 deletions
103
Godeps/Godeps.json
generated
103
Godeps/Godeps.json
generated
|
@ -1,103 +0,0 @@
|
||||||
{
|
|
||||||
"ImportPath": "github.com/Luzifer/vault2env",
|
|
||||||
"GoVersion": "go1.8",
|
|
||||||
"GodepVersion": "v79",
|
|
||||||
"Deps": [
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/Luzifer/go_helpers/env",
|
|
||||||
"Comment": "v1.4.0",
|
|
||||||
"Rev": "d76f718bb2d7d043fdf9dfdc01af03f20047432b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/Luzifer/rconfig",
|
|
||||||
"Comment": "v1.0.3-2-g2677653",
|
|
||||||
"Rev": "26776536e61487fdffbd3ce87f827177a5903f98"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/Sirupsen/logrus",
|
|
||||||
"Comment": "v0.10.0-38-g3ec0642",
|
|
||||||
"Rev": "3ec0642a7fb6488f65b06f9040adc67e3990296a"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/fatih/structs",
|
|
||||||
"Rev": "3fe2facc32a7fbde4b29c0f85604dc1dd22836d2"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/hashicorp/errwrap",
|
|
||||||
"Rev": "7554cd9344cec97297fa6649b055a8c98c2a1e55"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/hashicorp/go-cleanhttp",
|
|
||||||
"Rev": "ad28ea4487f05916463e2423a55166280e8254b5"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/hashicorp/go-multierror",
|
|
||||||
"Rev": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/hashicorp/go-rootcerts",
|
|
||||||
"Rev": "6bb64b370b90e7ef1fa532be9e591a81c3493e00"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/hashicorp/hcl",
|
|
||||||
"Rev": "9a905a34e6280ce905da1a32344b25e81011197a"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/hashicorp/hcl/hcl/ast",
|
|
||||||
"Rev": "9a905a34e6280ce905da1a32344b25e81011197a"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/hashicorp/hcl/hcl/parser",
|
|
||||||
"Rev": "9a905a34e6280ce905da1a32344b25e81011197a"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/hashicorp/hcl/hcl/scanner",
|
|
||||||
"Rev": "9a905a34e6280ce905da1a32344b25e81011197a"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/hashicorp/hcl/hcl/strconv",
|
|
||||||
"Rev": "9a905a34e6280ce905da1a32344b25e81011197a"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/hashicorp/hcl/hcl/token",
|
|
||||||
"Rev": "9a905a34e6280ce905da1a32344b25e81011197a"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/hashicorp/hcl/json/parser",
|
|
||||||
"Rev": "9a905a34e6280ce905da1a32344b25e81011197a"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/hashicorp/hcl/json/scanner",
|
|
||||||
"Rev": "9a905a34e6280ce905da1a32344b25e81011197a"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/hashicorp/hcl/json/token",
|
|
||||||
"Rev": "9a905a34e6280ce905da1a32344b25e81011197a"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/hashicorp/vault/api",
|
|
||||||
"Comment": "v0.6.0-beta2-24-gf0b06cf",
|
|
||||||
"Rev": "f0b06cf68a3a6d69d0572be23f5035c38d1f3539"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/mitchellh/go-homedir",
|
|
||||||
"Rev": "981ab348d865cf048eb7d17e78ac7192632d8415"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/mitchellh/mapstructure",
|
|
||||||
"Rev": "d2dd0262208475919e1a362f675cfc0e7c10e905"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/spf13/pflag",
|
|
||||||
"Rev": "b084184666e02084b8ccb9b704bf0d79c466eb1d"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/sys/unix",
|
|
||||||
"Rev": "8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "gopkg.in/yaml.v2",
|
|
||||||
"Rev": "53feefa2559fb8dfa8d81baad31be332c97d6c77"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
5
Godeps/Readme
generated
5
Godeps/Readme
generated
|
@ -1,5 +0,0 @@
|
||||||
This directory tree is generated automatically by godep.
|
|
||||||
|
|
||||||
Please do not edit.
|
|
||||||
|
|
||||||
See https://github.com/tools/godep for more information.
|
|
266
Gopkg.lock
generated
Normal file
266
Gopkg.lock
generated
Normal file
|
@ -0,0 +1,266 @@
|
||||||
|
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||||
|
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:b8d8c5d40e832a08722167c33c29bf691aabc501f6d8632e107936bd93898af7"
|
||||||
|
name = "github.com/Luzifer/go_helpers"
|
||||||
|
packages = ["env"]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "bbca4398656b348ce285438ca3dffb1fce6a3f4b"
|
||||||
|
version = "v2.8.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:f6cc072a289a686fda22819d871cd1b0407640141b2f6616dfbab957c96bf6c3"
|
||||||
|
name = "github.com/Luzifer/rconfig"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "5b80190bff90ccb9899db31e45baac7b1bede03b"
|
||||||
|
version = "v2.2.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:d848e2bdc690ea54c4b49894b67a05db318a97ee6561879b814c2c1f82f61406"
|
||||||
|
name = "github.com/Sirupsen/logrus"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "bcd833dfe83d3cebad139e4a29ed79cb2318bf95"
|
||||||
|
version = "v1.2.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:7f114b78210bf5b75f307fc97cff293633c835bab1e0ea8a744a44b39c042dfe"
|
||||||
|
name = "github.com/golang/snappy"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:f0d9d74edbd40fdeada436d5ac9cb5197407899af3fef85ff0137077ffe8ae19"
|
||||||
|
name = "github.com/hashicorp/errwrap"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "8a6fb523712970c966eefc6b39ed2c5e74880354"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:a5d940c38bf56f121721bfa747c66356df387cb9d5318c570c6d4170aab62862"
|
||||||
|
name = "github.com/hashicorp/go-cleanhttp"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "e8ab9daed8d1ddd2d3c4efba338fe2eeae2e4f18"
|
||||||
|
version = "v0.5.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:2ed138049ab373f696db2081ca48f15c5abdf20893803612a284f2bdce2bf443"
|
||||||
|
name = "github.com/hashicorp/go-multierror"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "886a7fbe3eb1c874d46f623bfa70af45f425b3d1"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:f299bf12387ef9e1e36571851c4bb2c5024b5e66d16cfa77b220ad488b47d196"
|
||||||
|
name = "github.com/hashicorp/go-retryablehttp"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "e651d75abec6fbd4f2c09508f72ae7af8a8b7171"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:cdb5ce76cd7af19e3d2d5ba9b6458a2ee804f0d376711215dd3df5f51100d423"
|
||||||
|
name = "github.com/hashicorp/go-rootcerts"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:ab128c55634eb166f6ab170896ac0f53979992250811071938d6bf2af7034690"
|
||||||
|
name = "github.com/hashicorp/go-sockaddr"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "6d291a969b86c4b633730bfc6b8b9d64c3aafed9"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:a0cf0cebf33237e580ef4f7bcc3e8174b74e955ba563a658b876fdc4962c6278"
|
||||||
|
name = "github.com/hashicorp/hcl"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"hcl/ast",
|
||||||
|
"hcl/parser",
|
||||||
|
"hcl/scanner",
|
||||||
|
"hcl/strconv",
|
||||||
|
"hcl/token",
|
||||||
|
"json/parser",
|
||||||
|
"json/scanner",
|
||||||
|
"json/token",
|
||||||
|
]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "65a6292f0157eff210d03ed1bf6c59b190b8b906"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:0935050931cdcd69030f5649b1418f17b409b4b0f73bc882e1c346c86cb9586d"
|
||||||
|
name = "github.com/hashicorp/vault"
|
||||||
|
packages = [
|
||||||
|
"api",
|
||||||
|
"helper/compressutil",
|
||||||
|
"helper/consts",
|
||||||
|
"helper/hclutil",
|
||||||
|
"helper/jsonutil",
|
||||||
|
"helper/parseutil",
|
||||||
|
"helper/strutil",
|
||||||
|
]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "a59ffa4a0f09bbf198241fe6793a96722789b639"
|
||||||
|
version = "v0.11.5"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:4059c14e87a2de3a434430340521b5feece186c1469eff0834c29a63870de3ed"
|
||||||
|
name = "github.com/konsorten/go-windows-terminal-sequences"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242"
|
||||||
|
version = "v1.0.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:a4df73029d2c42fabcb6b41e327d2f87e685284ec03edf76921c267d9cfc9c23"
|
||||||
|
name = "github.com/mitchellh/go-homedir"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "ae18d6b8b3205b561c79e8e5f69bff09736185f4"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:a45ae66dea4c899d79fceb116accfa1892105c251f0dcd9a217ddc276b42ec68"
|
||||||
|
name = "github.com/mitchellh/mapstructure"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe"
|
||||||
|
version = "v1.1.2"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:1d920dce8e11bfff65b5709e883a8ece131b63a5bc4b2cd404f9ef7eb445f73f"
|
||||||
|
name = "github.com/pierrec/lz4"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"internal/xxh32",
|
||||||
|
]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "635575b42742856941dbc767b44905bb9ba083f6"
|
||||||
|
version = "v2.0.7"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:5cf3f025cbee5951a4ee961de067c8a89fc95a5adabead774f82822efabab121"
|
||||||
|
name = "github.com/pkg/errors"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||||
|
version = "v0.8.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:09d61699d553a4e6ec998ad29816177b1f3d3ed0c18fe923d2c174ec065c99c8"
|
||||||
|
name = "github.com/ryanuber/go-glob"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "256dc444b735e061061cf46c809487313d5b0065"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779"
|
||||||
|
name = "github.com/spf13/pflag"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
|
||||||
|
version = "v1.0.3"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:38f553aff0273ad6f367cb0a0f8b6eecbaef8dc6cb8b50e57b6a81c1d5b1e332"
|
||||||
|
name = "golang.org/x/crypto"
|
||||||
|
packages = ["ssh/terminal"]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "eb0de9b17e854e9b1ccd9963efafc79862359959"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:d59fa2f6b43207b64304b29530b4c9c7d2466c1406429ab620ab68e689a868ac"
|
||||||
|
name = "golang.org/x/net"
|
||||||
|
packages = [
|
||||||
|
"context",
|
||||||
|
"http/httpguts",
|
||||||
|
"http2",
|
||||||
|
"http2/hpack",
|
||||||
|
"idna",
|
||||||
|
]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "49bb7cea24b1df9410e1712aa6433dae904ff66a"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:6ddfd101211f81df3ba1f474baf1c451f7708f01c1e0c4be49cd9f0af03596cf"
|
||||||
|
name = "golang.org/x/sys"
|
||||||
|
packages = [
|
||||||
|
"unix",
|
||||||
|
"windows",
|
||||||
|
]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "4ed8d59d0b35e1e29334a206d1b3f38b1e5dfb31"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:e7071ed636b5422cc51c0e3a6cebc229d6c9fffc528814b519a980641422d619"
|
||||||
|
name = "golang.org/x/text"
|
||||||
|
packages = [
|
||||||
|
"collate",
|
||||||
|
"collate/build",
|
||||||
|
"internal/colltab",
|
||||||
|
"internal/gen",
|
||||||
|
"internal/tag",
|
||||||
|
"internal/triegen",
|
||||||
|
"internal/ucd",
|
||||||
|
"language",
|
||||||
|
"secure/bidirule",
|
||||||
|
"transform",
|
||||||
|
"unicode/bidi",
|
||||||
|
"unicode/cldr",
|
||||||
|
"unicode/norm",
|
||||||
|
"unicode/rangetable",
|
||||||
|
]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||||
|
version = "v0.3.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:c9e7a4b4d47c0ed205d257648b0e5b0440880cb728506e318f8ac7cd36270bc4"
|
||||||
|
name = "golang.org/x/time"
|
||||||
|
packages = ["rate"]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "v2"
|
||||||
|
digest = "1:1ab6db2d2bd353449c5d1e976ba7a92a0ece6e83aaab3e6674f8f2f1faebb85a"
|
||||||
|
name = "gopkg.in/validator.v2"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "135c24b11c19e52befcae2ec3fca5d9b78c4e98e"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:7c95b35057a0ff2e19f707173cc1a947fa43a6eb5c4d300d196ece0334046082"
|
||||||
|
name = "gopkg.in/yaml.v2"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||||
|
version = "v2.2.1"
|
||||||
|
|
||||||
|
[solve-meta]
|
||||||
|
analyzer-name = "dep"
|
||||||
|
analyzer-version = 1
|
||||||
|
input-imports = [
|
||||||
|
"github.com/Luzifer/go_helpers/env",
|
||||||
|
"github.com/Luzifer/rconfig",
|
||||||
|
"github.com/Sirupsen/logrus",
|
||||||
|
"github.com/hashicorp/vault/api",
|
||||||
|
"github.com/mitchellh/go-homedir",
|
||||||
|
"github.com/pkg/errors",
|
||||||
|
]
|
||||||
|
solver-name = "gps-cdcl"
|
||||||
|
solver-version = 1
|
55
Gopkg.toml
Normal file
55
Gopkg.toml
Normal file
|
@ -0,0 +1,55 @@
|
||||||
|
# Gopkg.toml example
|
||||||
|
#
|
||||||
|
# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
|
||||||
|
# for detailed Gopkg.toml documentation.
|
||||||
|
#
|
||||||
|
# required = ["github.com/user/thing/cmd/thing"]
|
||||||
|
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||||
|
#
|
||||||
|
# [[constraint]]
|
||||||
|
# name = "github.com/user/project"
|
||||||
|
# version = "1.0.0"
|
||||||
|
#
|
||||||
|
# [[constraint]]
|
||||||
|
# name = "github.com/user/project2"
|
||||||
|
# branch = "dev"
|
||||||
|
# source = "github.com/myfork/project2"
|
||||||
|
#
|
||||||
|
# [[override]]
|
||||||
|
# name = "github.com/x/y"
|
||||||
|
# version = "2.4.0"
|
||||||
|
#
|
||||||
|
# [prune]
|
||||||
|
# non-go = false
|
||||||
|
# go-tests = true
|
||||||
|
# unused-packages = true
|
||||||
|
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/Luzifer/go_helpers"
|
||||||
|
version = "2.8.1"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/Luzifer/rconfig"
|
||||||
|
version = "2.2.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/Sirupsen/logrus"
|
||||||
|
version = "1.2.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/hashicorp/vault"
|
||||||
|
version = "0.11.5"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/mitchellh/go-homedir"
|
||||||
|
version = "1.0.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/pkg/errors"
|
||||||
|
version = "0.8.0"
|
||||||
|
|
||||||
|
[prune]
|
||||||
|
non-go = true
|
||||||
|
go-tests = true
|
||||||
|
unused-packages = true
|
202
vendor/github.com/Luzifer/go_helpers/LICENSE
generated
vendored
Normal file
202
vendor/github.com/Luzifer/go_helpers/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright 2016- Knut Ahlers <knut@ahlers.me>
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
|
8
vendor/github.com/Luzifer/rconfig/.travis.yml
generated
vendored
8
vendor/github.com/Luzifer/rconfig/.travis.yml
generated
vendored
|
@ -1,8 +0,0 @@
|
||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.4
|
|
||||||
- 1.5
|
|
||||||
- tip
|
|
||||||
|
|
||||||
script: go test -v -race -cover ./...
|
|
207
vendor/github.com/Luzifer/rconfig/LICENSE
generated
vendored
207
vendor/github.com/Luzifer/rconfig/LICENSE
generated
vendored
|
@ -1,13 +1,202 @@
|
||||||
Copyright 2015 Knut Ahlers <knut@ahlers.me>
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright 2015- Knut Ahlers <knut@ahlers.me>
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
Unless required by applicable law or agreed to in writing, software
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
See the License for the specific language governing permissions and
|
See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
|
|
||||||
|
|
94
vendor/github.com/Luzifer/rconfig/README.md
generated
vendored
94
vendor/github.com/Luzifer/rconfig/README.md
generated
vendored
|
@ -1,94 +0,0 @@
|
||||||
[![Build Status](https://travis-ci.org/Luzifer/rconfig.svg?branch=master)](https://travis-ci.org/Luzifer/rconfig)
|
|
||||||
[![License: Apache v2.0](https://badge.luzifer.io/v1/badge?color=5d79b5&title=license&text=Apache+v2.0)](http://www.apache.org/licenses/LICENSE-2.0)
|
|
||||||
[![Documentation](https://badge.luzifer.io/v1/badge?title=godoc&text=reference)](https://godoc.org/github.com/Luzifer/rconfig)
|
|
||||||
[![Go Report](http://goreportcard.com/badge/Luzifer/rconfig)](http://goreportcard.com/report/Luzifer/rconfig)
|
|
||||||
|
|
||||||
## Description
|
|
||||||
|
|
||||||
> Package rconfig implements a CLI configuration reader with struct-embedded defaults, environment variables and posix compatible flag parsing using the [pflag](https://github.com/spf13/pflag) library.
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
Install by running:
|
|
||||||
|
|
||||||
```
|
|
||||||
go get -u github.com/Luzifer/rconfig
|
|
||||||
```
|
|
||||||
|
|
||||||
OR fetch a specific version:
|
|
||||||
|
|
||||||
```
|
|
||||||
go get -u gopkg.in/luzifer/rconfig.v1
|
|
||||||
```
|
|
||||||
|
|
||||||
Run tests by running:
|
|
||||||
|
|
||||||
```
|
|
||||||
go test -v -race -cover github.com/Luzifer/rconfig
|
|
||||||
```
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
As a first step define a struct holding your configuration:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type config struct {
|
|
||||||
Username string `default:"unknown" flag:"user" description:"Your name"`
|
|
||||||
Details struct {
|
|
||||||
Age int `default:"25" flag:"age" env:"age" description:"Your age"`
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Next create an instance of that struct and let `rconfig` fill that config:
|
|
||||||
|
|
||||||
```go
|
|
||||||
var cfg config
|
|
||||||
func init() {
|
|
||||||
cfg = config{}
|
|
||||||
rconfig.Parse(&cfg)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
You're ready to access your configuration:
|
|
||||||
|
|
||||||
```go
|
|
||||||
func main() {
|
|
||||||
fmt.Printf("Hello %s, happy birthday for your %dth birthday.",
|
|
||||||
cfg.Username,
|
|
||||||
cfg.Details.Age)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Provide variable defaults by using a file
|
|
||||||
|
|
||||||
Given you have a file `~/.myapp.yml` containing some secrets or usernames (for the example below username is assumed to be "luzifer") as a default configuration for your application you can use this source code to load the defaults from that file using the `vardefault` tag in your configuration struct.
|
|
||||||
|
|
||||||
The order of the directives (lower number = higher precedence):
|
|
||||||
|
|
||||||
1. Flags provided in command line
|
|
||||||
1. Environment variables
|
|
||||||
1. Variable defaults (`vardefault` tag in the struct)
|
|
||||||
1. `default` tag in the struct
|
|
||||||
|
|
||||||
```go
|
|
||||||
type config struct {
|
|
||||||
Username string `vardefault:"username" flag:"username" description:"Your username"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var cfg = config{}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rconfig.SetVariableDefaults(rconfig.VarDefaultsFromYAMLFile("~/.myapp.yml"))
|
|
||||||
rconfig.Parse(&cfg)
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
fmt.Printf("Username = %s", cfg.Username)
|
|
||||||
// Output: Username = luzifer
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## More info
|
|
||||||
|
|
||||||
You can see the full reference documentation of the rconfig package [at godoc.org](https://godoc.org/github.com/Luzifer/rconfig), or through go's standard documentation system by running `godoc -http=:6060` and browsing to [http://localhost:6060/pkg/github.com/Luzifer/rconfig](http://localhost:6060/pkg/github.com/Luzifer/rconfig) after installation.
|
|
64
vendor/github.com/Luzifer/rconfig/autoenv.go
generated
vendored
Normal file
64
vendor/github.com/Luzifer/rconfig/autoenv.go
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
package rconfig
|
||||||
|
|
||||||
|
import "strings"
|
||||||
|
|
||||||
|
type characterClass [2]rune
|
||||||
|
|
||||||
|
func (c characterClass) Contains(r rune) bool {
|
||||||
|
return c[0] <= r && c[1] >= r
|
||||||
|
}
|
||||||
|
|
||||||
|
type characterClasses []characterClass
|
||||||
|
|
||||||
|
func (c characterClasses) Contains(r rune) bool {
|
||||||
|
for _, cc := range c {
|
||||||
|
if cc.Contains(r) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
charGroupUpperLetter = characterClass{'A', 'Z'}
|
||||||
|
charGroupLowerLetter = characterClass{'a', 'z'}
|
||||||
|
charGroupNumber = characterClass{'0', '9'}
|
||||||
|
charGroupLowerNumber = characterClasses{charGroupLowerLetter, charGroupNumber}
|
||||||
|
)
|
||||||
|
|
||||||
|
func deriveEnvVarName(s string) string {
|
||||||
|
var (
|
||||||
|
words []string
|
||||||
|
word []rune
|
||||||
|
)
|
||||||
|
|
||||||
|
for _, l := range s {
|
||||||
|
switch {
|
||||||
|
case charGroupUpperLetter.Contains(l):
|
||||||
|
if len(word) > 0 && charGroupLowerNumber.Contains(word[len(word)-1]) {
|
||||||
|
words = append(words, string(word))
|
||||||
|
word = []rune{}
|
||||||
|
}
|
||||||
|
word = append(word, l)
|
||||||
|
|
||||||
|
case charGroupLowerLetter.Contains(l):
|
||||||
|
if len(word) > 1 && charGroupUpperLetter.Contains(word[len(word)-1]) {
|
||||||
|
words = append(words, string(word[0:len(word)-1]))
|
||||||
|
word = word[len(word)-1:]
|
||||||
|
}
|
||||||
|
word = append(word, l)
|
||||||
|
|
||||||
|
case charGroupNumber.Contains(l):
|
||||||
|
word = append(word, l)
|
||||||
|
|
||||||
|
default:
|
||||||
|
if len(word) > 0 {
|
||||||
|
words = append(words, string(word))
|
||||||
|
}
|
||||||
|
word = []rune{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
words = append(words, string(word))
|
||||||
|
|
||||||
|
return strings.ToUpper(strings.Join(words, "_"))
|
||||||
|
}
|
172
vendor/github.com/Luzifer/rconfig/config.go
generated
vendored
172
vendor/github.com/Luzifer/rconfig/config.go
generated
vendored
|
@ -10,13 +10,31 @@ import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
|
validator "gopkg.in/validator.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type afterFunc func() error
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
autoEnv bool
|
||||||
fs *pflag.FlagSet
|
fs *pflag.FlagSet
|
||||||
variableDefaults map[string]string
|
variableDefaults map[string]string
|
||||||
|
|
||||||
|
timeParserFormats = []string{
|
||||||
|
// Default constants
|
||||||
|
time.RFC3339Nano, time.RFC3339,
|
||||||
|
time.RFC1123Z, time.RFC1123,
|
||||||
|
time.RFC822Z, time.RFC822,
|
||||||
|
time.RFC850, time.RubyDate, time.UnixDate, time.ANSIC,
|
||||||
|
"2006-01-02 15:04:05.999999999 -0700 MST",
|
||||||
|
// More uncommon time formats
|
||||||
|
"2006-01-02 15:04:05", "2006-01-02 15:04:05Z07:00", // Simplified ISO time format
|
||||||
|
"01/02/2006 15:04:05", "01/02/2006 15:04:05Z07:00", // US time format
|
||||||
|
"02.01.2006 15:04:05", "02.01.2006 15:04:05Z07:00", // DE time format
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -44,11 +62,32 @@ func Parse(config interface{}) error {
|
||||||
return parse(config, nil)
|
return parse(config, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ParseAndValidate works exactly like Parse but implements an additional run of
|
||||||
|
// the go-validator package on the configuration struct. Therefore additonal struct
|
||||||
|
// tags are supported like described in the readme file of the go-validator package:
|
||||||
|
//
|
||||||
|
// https://github.com/go-validator/validator/tree/v2#usage
|
||||||
|
func ParseAndValidate(config interface{}) error {
|
||||||
|
return parseAndValidate(config, nil)
|
||||||
|
}
|
||||||
|
|
||||||
// Args returns the non-flag command-line arguments.
|
// Args returns the non-flag command-line arguments.
|
||||||
func Args() []string {
|
func Args() []string {
|
||||||
return fs.Args()
|
return fs.Args()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddTimeParserFormats adds custom formats to parse time.Time fields
|
||||||
|
func AddTimeParserFormats(f ...string) {
|
||||||
|
timeParserFormats = append(timeParserFormats, f...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutoEnv enables or disables automated env variable guessing. If no `env` struct
|
||||||
|
// tag was set and AutoEnv is enabled the env variable name is derived from the
|
||||||
|
// name of the field: `MyFieldName` will get `MY_FIELD_NAME`
|
||||||
|
func AutoEnv(enable bool) {
|
||||||
|
autoEnv = enable
|
||||||
|
}
|
||||||
|
|
||||||
// Usage prints a basic usage with the corresponding defaults for the flags to
|
// Usage prints a basic usage with the corresponding defaults for the flags to
|
||||||
// os.Stdout. The defaults are derived from the `default` struct-tag and the ENV.
|
// os.Stdout. The defaults are derived from the `default` struct-tag and the ENV.
|
||||||
func Usage() {
|
func Usage() {
|
||||||
|
@ -64,28 +103,51 @@ func SetVariableDefaults(defaults map[string]string) {
|
||||||
variableDefaults = defaults
|
variableDefaults = defaults
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func parseAndValidate(in interface{}, args []string) error {
|
||||||
|
if err := parse(in, args); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return validator.Validate(in)
|
||||||
|
}
|
||||||
|
|
||||||
func parse(in interface{}, args []string) error {
|
func parse(in interface{}, args []string) error {
|
||||||
if args == nil {
|
if args == nil {
|
||||||
args = os.Args
|
args = os.Args
|
||||||
}
|
}
|
||||||
|
|
||||||
fs = pflag.NewFlagSet(os.Args[0], pflag.ExitOnError)
|
fs = pflag.NewFlagSet(os.Args[0], pflag.ExitOnError)
|
||||||
if err := execTags(in, fs); err != nil {
|
afterFuncs, err := execTags(in, fs)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return fs.Parse(args)
|
if err := fs.Parse(args); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if afterFuncs != nil {
|
||||||
|
for _, f := range afterFuncs {
|
||||||
|
if err := f(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func execTags(in interface{}, fs *pflag.FlagSet) error {
|
func execTags(in interface{}, fs *pflag.FlagSet) ([]afterFunc, error) {
|
||||||
if reflect.TypeOf(in).Kind() != reflect.Ptr {
|
if reflect.TypeOf(in).Kind() != reflect.Ptr {
|
||||||
return errors.New("Calling parser with non-pointer")
|
return nil, errors.New("Calling parser with non-pointer")
|
||||||
}
|
}
|
||||||
|
|
||||||
if reflect.ValueOf(in).Elem().Kind() != reflect.Struct {
|
if reflect.ValueOf(in).Elem().Kind() != reflect.Struct {
|
||||||
return errors.New("Calling parser with pointer to non-struct")
|
return nil, errors.New("Calling parser with pointer to non-struct")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
afterFuncs := []afterFunc{}
|
||||||
|
|
||||||
st := reflect.ValueOf(in).Elem()
|
st := reflect.ValueOf(in).Elem()
|
||||||
for i := 0; i < st.NumField(); i++ {
|
for i := 0; i < st.NumField(); i++ {
|
||||||
valField := st.Field(i)
|
valField := st.Field(i)
|
||||||
|
@ -97,9 +159,79 @@ func execTags(in interface{}, fs *pflag.FlagSet) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
value := varDefault(typeField.Tag.Get("vardefault"), typeField.Tag.Get("default"))
|
value := varDefault(typeField.Tag.Get("vardefault"), typeField.Tag.Get("default"))
|
||||||
value = envDefault(typeField.Tag.Get("env"), value)
|
value = envDefault(typeField, value)
|
||||||
parts := strings.Split(typeField.Tag.Get("flag"), ",")
|
parts := strings.Split(typeField.Tag.Get("flag"), ",")
|
||||||
|
|
||||||
|
switch typeField.Type {
|
||||||
|
case reflect.TypeOf(time.Duration(0)):
|
||||||
|
v, err := time.ParseDuration(value)
|
||||||
|
if err != nil {
|
||||||
|
if value == "" {
|
||||||
|
v = time.Duration(0)
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if typeField.Tag.Get("flag") != "" {
|
||||||
|
if len(parts) == 1 {
|
||||||
|
fs.DurationVar(valField.Addr().Interface().(*time.Duration), parts[0], v, typeField.Tag.Get("description"))
|
||||||
|
} else {
|
||||||
|
fs.DurationVarP(valField.Addr().Interface().(*time.Duration), parts[0], parts[1], v, typeField.Tag.Get("description"))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
valField.Set(reflect.ValueOf(v))
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
|
||||||
|
case reflect.TypeOf(time.Time{}):
|
||||||
|
var sVar string
|
||||||
|
|
||||||
|
if typeField.Tag.Get("flag") != "" {
|
||||||
|
if len(parts) == 1 {
|
||||||
|
fs.StringVar(&sVar, parts[0], value, typeField.Tag.Get("description"))
|
||||||
|
} else {
|
||||||
|
fs.StringVarP(&sVar, parts[0], parts[1], value, typeField.Tag.Get("description"))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
sVar = value
|
||||||
|
}
|
||||||
|
|
||||||
|
afterFuncs = append(afterFuncs, func(valField reflect.Value, sVar *string) func() error {
|
||||||
|
return func() error {
|
||||||
|
if *sVar == "" {
|
||||||
|
// No time, no problem
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check whether we could have a timestamp
|
||||||
|
if ts, err := strconv.ParseInt(*sVar, 10, 64); err == nil {
|
||||||
|
t := time.Unix(ts, 0)
|
||||||
|
valField.Set(reflect.ValueOf(t))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// We haven't so lets walk through possible time formats
|
||||||
|
matched := false
|
||||||
|
for _, tf := range timeParserFormats {
|
||||||
|
if t, err := time.Parse(tf, *sVar); err == nil {
|
||||||
|
matched = true
|
||||||
|
valField.Set(reflect.ValueOf(t))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !matched {
|
||||||
|
return fmt.Errorf("Value %q did not match expected time formats", *sVar)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}(valField, &sVar))
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
switch typeField.Type.Kind() {
|
switch typeField.Type.Kind() {
|
||||||
case reflect.String:
|
case reflect.String:
|
||||||
if typeField.Tag.Get("flag") != "" {
|
if typeField.Tag.Get("flag") != "" {
|
||||||
|
@ -130,7 +262,7 @@ func execTags(in interface{}, fs *pflag.FlagSet) error {
|
||||||
if value == "" {
|
if value == "" {
|
||||||
vt = 0
|
vt = 0
|
||||||
} else {
|
} else {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if typeField.Tag.Get("flag") != "" {
|
if typeField.Tag.Get("flag") != "" {
|
||||||
|
@ -145,7 +277,7 @@ func execTags(in interface{}, fs *pflag.FlagSet) error {
|
||||||
if value == "" {
|
if value == "" {
|
||||||
vt = 0
|
vt = 0
|
||||||
} else {
|
} else {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if typeField.Tag.Get("flag") != "" {
|
if typeField.Tag.Get("flag") != "" {
|
||||||
|
@ -160,7 +292,7 @@ func execTags(in interface{}, fs *pflag.FlagSet) error {
|
||||||
if value == "" {
|
if value == "" {
|
||||||
vt = 0.0
|
vt = 0.0
|
||||||
} else {
|
} else {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if typeField.Tag.Get("flag") != "" {
|
if typeField.Tag.Get("flag") != "" {
|
||||||
|
@ -170,9 +302,11 @@ func execTags(in interface{}, fs *pflag.FlagSet) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
case reflect.Struct:
|
case reflect.Struct:
|
||||||
if err := execTags(valField.Addr().Interface(), fs); err != nil {
|
afs, err := execTags(valField.Addr().Interface(), fs)
|
||||||
return err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
afterFuncs = append(afterFuncs, afs...)
|
||||||
|
|
||||||
case reflect.Slice:
|
case reflect.Slice:
|
||||||
switch typeField.Type.Elem().Kind() {
|
switch typeField.Type.Elem().Kind() {
|
||||||
|
@ -181,7 +315,7 @@ func execTags(in interface{}, fs *pflag.FlagSet) error {
|
||||||
for _, v := range strings.Split(value, ",") {
|
for _, v := range strings.Split(value, ",") {
|
||||||
it, err := strconv.ParseInt(strings.TrimSpace(v), 10, 64)
|
it, err := strconv.ParseInt(strings.TrimSpace(v), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
def = append(def, int(it))
|
def = append(def, int(it))
|
||||||
}
|
}
|
||||||
|
@ -195,7 +329,10 @@ func execTags(in interface{}, fs *pflag.FlagSet) error {
|
||||||
if len(del) == 0 {
|
if len(del) == 0 {
|
||||||
del = ","
|
del = ","
|
||||||
}
|
}
|
||||||
def := strings.Split(value, del)
|
var def = []string{}
|
||||||
|
if value != "" {
|
||||||
|
def = strings.Split(value, del)
|
||||||
|
}
|
||||||
if len(parts) == 1 {
|
if len(parts) == 1 {
|
||||||
fs.StringSliceVar(valField.Addr().Interface().(*[]string), parts[0], def, typeField.Tag.Get("description"))
|
fs.StringSliceVar(valField.Addr().Interface().(*[]string), parts[0], def, typeField.Tag.Get("description"))
|
||||||
} else {
|
} else {
|
||||||
|
@ -205,7 +342,7 @@ func execTags(in interface{}, fs *pflag.FlagSet) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return afterFuncs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func registerFlagFloat(t reflect.Kind, fs *pflag.FlagSet, field interface{}, parts []string, vt float64, desc string) {
|
func registerFlagFloat(t reflect.Kind, fs *pflag.FlagSet, field interface{}, parts []string, vt float64, desc string) {
|
||||||
|
@ -289,9 +426,14 @@ func registerFlagUint(t reflect.Kind, fs *pflag.FlagSet, field interface{}, part
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func envDefault(env, def string) string {
|
func envDefault(field reflect.StructField, def string) string {
|
||||||
value := def
|
value := def
|
||||||
|
|
||||||
|
env := field.Tag.Get("env")
|
||||||
|
if env == "" && autoEnv {
|
||||||
|
env = deriveEnvVarName(field.Name)
|
||||||
|
}
|
||||||
|
|
||||||
if env != "" {
|
if env != "" {
|
||||||
if e := os.Getenv(env); e != "" {
|
if e := os.Getenv(env); e != "" {
|
||||||
value = e
|
value = e
|
||||||
|
|
1
vendor/github.com/Sirupsen/logrus/.gitignore
generated
vendored
1
vendor/github.com/Sirupsen/logrus/.gitignore
generated
vendored
|
@ -1 +0,0 @@
|
||||||
logrus
|
|
10
vendor/github.com/Sirupsen/logrus/.travis.yml
generated
vendored
10
vendor/github.com/Sirupsen/logrus/.travis.yml
generated
vendored
|
@ -1,10 +0,0 @@
|
||||||
language: go
|
|
||||||
go:
|
|
||||||
- 1.3
|
|
||||||
- 1.4
|
|
||||||
- 1.5
|
|
||||||
- 1.6
|
|
||||||
- tip
|
|
||||||
install:
|
|
||||||
- go get -t ./...
|
|
||||||
script: GOMAXPROCS=4 GORACE="halt_on_error=1" go test -race -v ./...
|
|
66
vendor/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
66
vendor/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
|
@ -1,66 +0,0 @@
|
||||||
# 0.10.0
|
|
||||||
|
|
||||||
* feature: Add a test hook (#180)
|
|
||||||
* feature: `ParseLevel` is now case-insensitive (#326)
|
|
||||||
* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
|
|
||||||
* performance: avoid re-allocations on `WithFields` (#335)
|
|
||||||
|
|
||||||
# 0.9.0
|
|
||||||
|
|
||||||
* logrus/text_formatter: don't emit empty msg
|
|
||||||
* logrus/hooks/airbrake: move out of main repository
|
|
||||||
* logrus/hooks/sentry: move out of main repository
|
|
||||||
* logrus/hooks/papertrail: move out of main repository
|
|
||||||
* logrus/hooks/bugsnag: move out of main repository
|
|
||||||
* logrus/core: run tests with `-race`
|
|
||||||
* logrus/core: detect TTY based on `stderr`
|
|
||||||
* logrus/core: support `WithError` on logger
|
|
||||||
* logrus/core: Solaris support
|
|
||||||
|
|
||||||
# 0.8.7
|
|
||||||
|
|
||||||
* logrus/core: fix possible race (#216)
|
|
||||||
* logrus/doc: small typo fixes and doc improvements
|
|
||||||
|
|
||||||
|
|
||||||
# 0.8.6
|
|
||||||
|
|
||||||
* hooks/raven: allow passing an initialized client
|
|
||||||
|
|
||||||
# 0.8.5
|
|
||||||
|
|
||||||
* logrus/core: revert #208
|
|
||||||
|
|
||||||
# 0.8.4
|
|
||||||
|
|
||||||
* formatter/text: fix data race (#218)
|
|
||||||
|
|
||||||
# 0.8.3
|
|
||||||
|
|
||||||
* logrus/core: fix entry log level (#208)
|
|
||||||
* logrus/core: improve performance of text formatter by 40%
|
|
||||||
* logrus/core: expose `LevelHooks` type
|
|
||||||
* logrus/core: add support for DragonflyBSD and NetBSD
|
|
||||||
* formatter/text: print structs more verbosely
|
|
||||||
|
|
||||||
# 0.8.2
|
|
||||||
|
|
||||||
* logrus: fix more Fatal family functions
|
|
||||||
|
|
||||||
# 0.8.1
|
|
||||||
|
|
||||||
* logrus: fix not exiting on `Fatalf` and `Fatalln`
|
|
||||||
|
|
||||||
# 0.8.0
|
|
||||||
|
|
||||||
* logrus: defaults to stderr instead of stdout
|
|
||||||
* hooks/sentry: add special field for `*http.Request`
|
|
||||||
* formatter/text: ignore Windows for colors
|
|
||||||
|
|
||||||
# 0.7.3
|
|
||||||
|
|
||||||
* formatter/\*: allow configuration of timestamp layout
|
|
||||||
|
|
||||||
# 0.7.2
|
|
||||||
|
|
||||||
* formatter/text: Add configuration option for time format (#158)
|
|
421
vendor/github.com/Sirupsen/logrus/README.md
generated
vendored
421
vendor/github.com/Sirupsen/logrus/README.md
generated
vendored
|
@ -1,421 +0,0 @@
|
||||||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
|
|
||||||
|
|
||||||
Logrus is a structured logger for Go (golang), completely API compatible with
|
|
||||||
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
|
|
||||||
yet stable (pre 1.0). Logrus itself is completely stable and has been used in
|
|
||||||
many large deployments. The core API is unlikely to change much but please
|
|
||||||
version control your Logrus to make sure you aren't fetching latest `master` on
|
|
||||||
every build.**
|
|
||||||
|
|
||||||
Nicely color-coded in development (when a TTY is attached, otherwise just
|
|
||||||
plain text):
|
|
||||||
|
|
||||||
![Colored](http://i.imgur.com/PY7qMwd.png)
|
|
||||||
|
|
||||||
With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
|
|
||||||
or Splunk:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
|
|
||||||
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
|
|
||||||
|
|
||||||
{"level":"warning","msg":"The group's number increased tremendously!",
|
|
||||||
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
|
|
||||||
|
|
||||||
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
|
|
||||||
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
|
|
||||||
|
|
||||||
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
|
|
||||||
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
|
|
||||||
|
|
||||||
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
|
|
||||||
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
|
|
||||||
```
|
|
||||||
|
|
||||||
With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
|
|
||||||
attached, the output is compatible with the
|
|
||||||
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
|
|
||||||
|
|
||||||
```text
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
|
|
||||||
exit status 1
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Example
|
|
||||||
|
|
||||||
The simplest way to use Logrus is simply the package-level exported logger:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
log "github.com/Sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"animal": "walrus",
|
|
||||||
}).Info("A walrus appears")
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that it's completely api-compatible with the stdlib logger, so you can
|
|
||||||
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
|
|
||||||
and you'll now have the flexibility of Logrus. You can customize it all you
|
|
||||||
want:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
log "github.com/Sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// Log as JSON instead of the default ASCII formatter.
|
|
||||||
log.SetFormatter(&log.JSONFormatter{})
|
|
||||||
|
|
||||||
// Output to stderr instead of stdout, could also be a file.
|
|
||||||
log.SetOutput(os.Stderr)
|
|
||||||
|
|
||||||
// Only log the warning severity or above.
|
|
||||||
log.SetLevel(log.WarnLevel)
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"animal": "walrus",
|
|
||||||
"size": 10,
|
|
||||||
}).Info("A group of walrus emerges from the ocean")
|
|
||||||
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"omg": true,
|
|
||||||
"number": 122,
|
|
||||||
}).Warn("The group's number increased tremendously!")
|
|
||||||
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"omg": true,
|
|
||||||
"number": 100,
|
|
||||||
}).Fatal("The ice breaks!")
|
|
||||||
|
|
||||||
// A common pattern is to re-use fields between logging statements by re-using
|
|
||||||
// the logrus.Entry returned from WithFields()
|
|
||||||
contextLogger := log.WithFields(log.Fields{
|
|
||||||
"common": "this is a common field",
|
|
||||||
"other": "I also should be logged always",
|
|
||||||
})
|
|
||||||
|
|
||||||
contextLogger.Info("I'll be logged with common and other field")
|
|
||||||
contextLogger.Info("Me too")
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
For more advanced usage such as logging to multiple locations from the same
|
|
||||||
application, you can also create an instance of the `logrus` Logger:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Create a new instance of the logger. You can have any number of instances.
|
|
||||||
var log = logrus.New()
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// The API for setting attributes is a little different than the package level
|
|
||||||
// exported logger. See Godoc.
|
|
||||||
log.Out = os.Stderr
|
|
||||||
|
|
||||||
log.WithFields(logrus.Fields{
|
|
||||||
"animal": "walrus",
|
|
||||||
"size": 10,
|
|
||||||
}).Info("A group of walrus emerges from the ocean")
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Fields
|
|
||||||
|
|
||||||
Logrus encourages careful, structured logging though logging fields instead of
|
|
||||||
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
|
|
||||||
to send event %s to topic %s with key %d")`, you should log the much more
|
|
||||||
discoverable:
|
|
||||||
|
|
||||||
```go
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"event": event,
|
|
||||||
"topic": topic,
|
|
||||||
"key": key,
|
|
||||||
}).Fatal("Failed to send event")
|
|
||||||
```
|
|
||||||
|
|
||||||
We've found this API forces you to think about logging in a way that produces
|
|
||||||
much more useful logging messages. We've been in countless situations where just
|
|
||||||
a single added field to a log statement that was already there would've saved us
|
|
||||||
hours. The `WithFields` call is optional.
|
|
||||||
|
|
||||||
In general, with Logrus using any of the `printf`-family functions should be
|
|
||||||
seen as a hint you should add a field, however, you can still use the
|
|
||||||
`printf`-family functions with Logrus.
|
|
||||||
|
|
||||||
#### Hooks
|
|
||||||
|
|
||||||
You can add hooks for logging levels. For example to send errors to an exception
|
|
||||||
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
|
|
||||||
multiple places simultaneously, e.g. syslog.
|
|
||||||
|
|
||||||
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
|
||||||
`init`:
|
|
||||||
|
|
||||||
```go
|
|
||||||
import (
|
|
||||||
log "github.com/Sirupsen/logrus"
|
|
||||||
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
|
|
||||||
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
|
|
||||||
"log/syslog"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
|
|
||||||
// Use the Airbrake hook to report errors that have Error severity or above to
|
|
||||||
// an exception tracker. You can create custom hooks, see the Hooks section.
|
|
||||||
log.AddHook(airbrake.NewHook(123, "xyz", "production"))
|
|
||||||
|
|
||||||
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Unable to connect to local syslog daemon")
|
|
||||||
} else {
|
|
||||||
log.AddHook(hook)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
|
|
||||||
|
|
||||||
| Hook | Description |
|
|
||||||
| ----- | ----------- |
|
|
||||||
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
|
|
||||||
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
|
|
||||||
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
|
|
||||||
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
|
||||||
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
|
||||||
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
|
|
||||||
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
|
|
||||||
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
|
|
||||||
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
|
||||||
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
|
||||||
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
|
|
||||||
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
|
|
||||||
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
|
|
||||||
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
|
|
||||||
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
|
|
||||||
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
|
|
||||||
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
|
|
||||||
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
|
|
||||||
| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
|
|
||||||
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
|
|
||||||
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
|
|
||||||
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
|
|
||||||
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
|
|
||||||
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
|
|
||||||
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
|
|
||||||
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
|
|
||||||
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
|
|
||||||
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
|
|
||||||
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
|
|
||||||
| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
|
|
||||||
|
|
||||||
|
|
||||||
#### Level logging
|
|
||||||
|
|
||||||
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
|
|
||||||
|
|
||||||
```go
|
|
||||||
log.Debug("Useful debugging information.")
|
|
||||||
log.Info("Something noteworthy happened!")
|
|
||||||
log.Warn("You should probably take a look at this.")
|
|
||||||
log.Error("Something failed but I'm not quitting.")
|
|
||||||
// Calls os.Exit(1) after logging
|
|
||||||
log.Fatal("Bye.")
|
|
||||||
// Calls panic() after logging
|
|
||||||
log.Panic("I'm bailing.")
|
|
||||||
```
|
|
||||||
|
|
||||||
You can set the logging level on a `Logger`, then it will only log entries with
|
|
||||||
that severity or anything above it:
|
|
||||||
|
|
||||||
```go
|
|
||||||
// Will log anything that is info or above (warn, error, fatal, panic). Default.
|
|
||||||
log.SetLevel(log.InfoLevel)
|
|
||||||
```
|
|
||||||
|
|
||||||
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
|
|
||||||
environment if your application has that.
|
|
||||||
|
|
||||||
#### Entries
|
|
||||||
|
|
||||||
Besides the fields added with `WithField` or `WithFields` some fields are
|
|
||||||
automatically added to all logging events:
|
|
||||||
|
|
||||||
1. `time`. The timestamp when the entry was created.
|
|
||||||
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
|
|
||||||
the `AddFields` call. E.g. `Failed to send event.`
|
|
||||||
3. `level`. The logging level. E.g. `info`.
|
|
||||||
|
|
||||||
#### Environments
|
|
||||||
|
|
||||||
Logrus has no notion of environment.
|
|
||||||
|
|
||||||
If you wish for hooks and formatters to only be used in specific environments,
|
|
||||||
you should handle that yourself. For example, if your application has a global
|
|
||||||
variable `Environment`, which is a string representation of the environment you
|
|
||||||
could do:
|
|
||||||
|
|
||||||
```go
|
|
||||||
import (
|
|
||||||
log "github.com/Sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
init() {
|
|
||||||
// do something here to set environment depending on an environment variable
|
|
||||||
// or command-line flag
|
|
||||||
if Environment == "production" {
|
|
||||||
log.SetFormatter(&log.JSONFormatter{})
|
|
||||||
} else {
|
|
||||||
// The TextFormatter is default, you don't actually have to do this.
|
|
||||||
log.SetFormatter(&log.TextFormatter{})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This configuration is how `logrus` was intended to be used, but JSON in
|
|
||||||
production is mostly only useful if you do log aggregation with tools like
|
|
||||||
Splunk or Logstash.
|
|
||||||
|
|
||||||
#### Formatters
|
|
||||||
|
|
||||||
The built-in logging formatters are:
|
|
||||||
|
|
||||||
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
|
|
||||||
without colors.
|
|
||||||
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
|
|
||||||
field to `true`. To force no colored output even if there is a TTY set the
|
|
||||||
`DisableColors` field to `true`
|
|
||||||
* `logrus.JSONFormatter`. Logs fields as JSON.
|
|
||||||
|
|
||||||
Third party logging formatters:
|
|
||||||
|
|
||||||
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
|
|
||||||
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
|
|
||||||
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
|
||||||
|
|
||||||
You can define your formatter by implementing the `Formatter` interface,
|
|
||||||
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
|
|
||||||
`Fields` type (`map[string]interface{}`) with all your fields as well as the
|
|
||||||
default ones (see Entries section above):
|
|
||||||
|
|
||||||
```go
|
|
||||||
type MyJSONFormatter struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
log.SetFormatter(new(MyJSONFormatter))
|
|
||||||
|
|
||||||
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
|
|
||||||
// Note this doesn't include Time, Level and Message which are available on
|
|
||||||
// the Entry. Consult `godoc` on information about those fields or read the
|
|
||||||
// source of the official loggers.
|
|
||||||
serialized, err := json.Marshal(entry.Data)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
|
||||||
}
|
|
||||||
return append(serialized, '\n'), nil
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Logger as an `io.Writer`
|
|
||||||
|
|
||||||
Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
|
|
||||||
|
|
||||||
```go
|
|
||||||
w := logger.Writer()
|
|
||||||
defer w.Close()
|
|
||||||
|
|
||||||
srv := http.Server{
|
|
||||||
// create a stdlib log.Logger that writes to
|
|
||||||
// logrus.Logger.
|
|
||||||
ErrorLog: log.New(w, "", 0),
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Each line written to that writer will be printed the usual way, using formatters
|
|
||||||
and hooks. The level for those entries is `info`.
|
|
||||||
|
|
||||||
#### Rotation
|
|
||||||
|
|
||||||
Log rotation is not provided with Logrus. Log rotation should be done by an
|
|
||||||
external program (like `logrotate(8)`) that can compress and delete old log
|
|
||||||
entries. It should not be a feature of the application-level logger.
|
|
||||||
|
|
||||||
#### Tools
|
|
||||||
|
|
||||||
| Tool | Description |
|
|
||||||
| ---- | ----------- |
|
|
||||||
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
|
|
||||||
|
|
||||||
#### Testing
|
|
||||||
|
|
||||||
Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
|
|
||||||
|
|
||||||
* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
|
|
||||||
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
|
|
||||||
|
|
||||||
```go
|
|
||||||
logger, hook := NewNullLogger()
|
|
||||||
logger.Error("Hello error")
|
|
||||||
|
|
||||||
assert.Equal(1, len(hook.Entries))
|
|
||||||
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
|
|
||||||
assert.Equal("Hello error", hook.LastEntry().Message)
|
|
||||||
|
|
||||||
hook.Reset()
|
|
||||||
assert.Nil(hook.LastEntry())
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Fatal handlers
|
|
||||||
|
|
||||||
Logrus can register one or more functions that will be called when any `fatal`
|
|
||||||
level message is logged. The registered handlers will be executed before
|
|
||||||
logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
|
|
||||||
to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
|
|
||||||
|
|
||||||
```
|
|
||||||
...
|
|
||||||
handler := func() {
|
|
||||||
// gracefully shutdown something...
|
|
||||||
}
|
|
||||||
logrus.RegisterExitHandler(handler)
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Thread safty
|
|
||||||
|
|
||||||
By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
|
|
||||||
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
|
|
||||||
|
|
||||||
Situation when locking is not needed includes:
|
|
||||||
|
|
||||||
* You have no hooks registered, or hooks calling is already thread-safe.
|
|
||||||
|
|
||||||
* Writing to logger.Out is already thread-safe, for example:
|
|
||||||
|
|
||||||
1) logger.Out is protected by locks.
|
|
||||||
|
|
||||||
2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
|
|
||||||
|
|
||||||
(Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
|
|
2
vendor/github.com/Sirupsen/logrus/alt_exit.go
generated
vendored
2
vendor/github.com/Sirupsen/logrus/alt_exit.go
generated
vendored
|
@ -1,7 +1,7 @@
|
||||||
package logrus
|
package logrus
|
||||||
|
|
||||||
// The following code was sourced and modified from the
|
// The following code was sourced and modified from the
|
||||||
// https://bitbucket.org/tebeka/atexit package governed by the following license:
|
// https://github.com/tebeka/atexit package governed by the following license:
|
||||||
//
|
//
|
||||||
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
|
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
|
||||||
//
|
//
|
||||||
|
|
4
vendor/github.com/Sirupsen/logrus/doc.go
generated
vendored
4
vendor/github.com/Sirupsen/logrus/doc.go
generated
vendored
|
@ -7,7 +7,7 @@ The simplest way to use Logrus is simply the package-level exported logger:
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
log "github.com/Sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
@ -21,6 +21,6 @@ The simplest way to use Logrus is simply the package-level exported logger:
|
||||||
Output:
|
Output:
|
||||||
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
|
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
|
||||||
|
|
||||||
For a full guide visit https://github.com/Sirupsen/logrus
|
For a full guide visit https://github.com/sirupsen/logrus
|
||||||
*/
|
*/
|
||||||
package logrus
|
package logrus
|
||||||
|
|
231
vendor/github.com/Sirupsen/logrus/entry.go
generated
vendored
231
vendor/github.com/Sirupsen/logrus/entry.go
generated
vendored
|
@ -4,11 +4,30 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
var bufferPool *sync.Pool
|
var (
|
||||||
|
bufferPool *sync.Pool
|
||||||
|
|
||||||
|
// qualified package name, cached at first use
|
||||||
|
logrusPackage string
|
||||||
|
|
||||||
|
// Positions in the call stack when tracing to report the calling method
|
||||||
|
minimumCallerDepth int
|
||||||
|
|
||||||
|
// Used for caller information initialisation
|
||||||
|
callerInitOnce sync.Once
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
maximumCallerDepth int = 25
|
||||||
|
knownLogrusFrames int = 4
|
||||||
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
bufferPool = &sync.Pool{
|
bufferPool = &sync.Pool{
|
||||||
|
@ -16,15 +35,18 @@ func init() {
|
||||||
return new(bytes.Buffer)
|
return new(bytes.Buffer)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// start at the bottom of the stack before the package-name cache is primed
|
||||||
|
minimumCallerDepth = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Defines the key when adding errors using WithError.
|
// Defines the key when adding errors using WithError.
|
||||||
var ErrorKey = "error"
|
var ErrorKey = "error"
|
||||||
|
|
||||||
// An entry is the final or intermediate Logrus logging entry. It contains all
|
// An entry is the final or intermediate Logrus logging entry. It contains all
|
||||||
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
|
// the fields passed with WithField{,s}. It's finally logged when Trace, Debug,
|
||||||
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
|
// Info, Warn, Error, Fatal or Panic is called on it. These objects can be
|
||||||
// passed around as much as you wish to avoid field duplication.
|
// reused and passed around as much as you wish to avoid field duplication.
|
||||||
type Entry struct {
|
type Entry struct {
|
||||||
Logger *Logger
|
Logger *Logger
|
||||||
|
|
||||||
|
@ -34,21 +56,28 @@ type Entry struct {
|
||||||
// Time at which the log entry was created
|
// Time at which the log entry was created
|
||||||
Time time.Time
|
Time time.Time
|
||||||
|
|
||||||
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
|
// Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic
|
||||||
|
// This field will be set on entry firing and the value will be equal to the one in Logger struct field.
|
||||||
Level Level
|
Level Level
|
||||||
|
|
||||||
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
|
// Calling method, with package name
|
||||||
|
Caller *runtime.Frame
|
||||||
|
|
||||||
|
// Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic
|
||||||
Message string
|
Message string
|
||||||
|
|
||||||
// When formatter is called in entry.log(), an Buffer may be set to entry
|
// When formatter is called in entry.log(), a Buffer may be set to entry
|
||||||
Buffer *bytes.Buffer
|
Buffer *bytes.Buffer
|
||||||
|
|
||||||
|
// err may contain a field formatting error
|
||||||
|
err string
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEntry(logger *Logger) *Entry {
|
func NewEntry(logger *Logger) *Entry {
|
||||||
return &Entry{
|
return &Entry{
|
||||||
Logger: logger,
|
Logger: logger,
|
||||||
// Default is three fields, give a little extra room
|
// Default is three fields, plus one optional. Give a little extra room.
|
||||||
Data: make(Fields, 5),
|
Data: make(Fields, 6),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -79,43 +108,106 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
|
||||||
for k, v := range entry.Data {
|
for k, v := range entry.Data {
|
||||||
data[k] = v
|
data[k] = v
|
||||||
}
|
}
|
||||||
|
var field_err string
|
||||||
for k, v := range fields {
|
for k, v := range fields {
|
||||||
|
if t := reflect.TypeOf(v); t != nil && t.Kind() == reflect.Func {
|
||||||
|
field_err = fmt.Sprintf("can not add field %q", k)
|
||||||
|
if entry.err != "" {
|
||||||
|
field_err = entry.err + ", " + field_err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
data[k] = v
|
data[k] = v
|
||||||
}
|
}
|
||||||
return &Entry{Logger: entry.Logger, Data: data}
|
}
|
||||||
|
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: field_err}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Overrides the time of the Entry.
|
||||||
|
func (entry *Entry) WithTime(t time.Time) *Entry {
|
||||||
|
return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getPackageName reduces a fully qualified function name to the package name
|
||||||
|
// There really ought to be to be a better way...
|
||||||
|
func getPackageName(f string) string {
|
||||||
|
for {
|
||||||
|
lastPeriod := strings.LastIndex(f, ".")
|
||||||
|
lastSlash := strings.LastIndex(f, "/")
|
||||||
|
if lastPeriod > lastSlash {
|
||||||
|
f = f[:lastPeriod]
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// getCaller retrieves the name of the first non-logrus calling function
|
||||||
|
func getCaller() *runtime.Frame {
|
||||||
|
// Restrict the lookback frames to avoid runaway lookups
|
||||||
|
pcs := make([]uintptr, maximumCallerDepth)
|
||||||
|
depth := runtime.Callers(minimumCallerDepth, pcs)
|
||||||
|
frames := runtime.CallersFrames(pcs[:depth])
|
||||||
|
|
||||||
|
// cache this package's fully-qualified name
|
||||||
|
callerInitOnce.Do(func() {
|
||||||
|
logrusPackage = getPackageName(runtime.FuncForPC(pcs[0]).Name())
|
||||||
|
|
||||||
|
// now that we have the cache, we can skip a minimum count of known-logrus functions
|
||||||
|
// XXX this is dubious, the number of frames may vary store an entry in a logger interface
|
||||||
|
minimumCallerDepth = knownLogrusFrames
|
||||||
|
})
|
||||||
|
|
||||||
|
for f, again := frames.Next(); again; f, again = frames.Next() {
|
||||||
|
pkg := getPackageName(f.Function)
|
||||||
|
|
||||||
|
// If the caller isn't part of this package, we're done
|
||||||
|
if pkg != logrusPackage {
|
||||||
|
return &f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if we got here, we failed to find the caller's context
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry Entry) HasCaller() (has bool) {
|
||||||
|
return entry.Logger != nil &&
|
||||||
|
entry.Logger.ReportCaller &&
|
||||||
|
entry.Caller != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function is not declared with a pointer value because otherwise
|
// This function is not declared with a pointer value because otherwise
|
||||||
// race conditions will occur when using multiple goroutines
|
// race conditions will occur when using multiple goroutines
|
||||||
func (entry Entry) log(level Level, msg string) {
|
func (entry Entry) log(level Level, msg string) {
|
||||||
var buffer *bytes.Buffer
|
var buffer *bytes.Buffer
|
||||||
|
|
||||||
|
// Default to now, but allow users to override if they want.
|
||||||
|
//
|
||||||
|
// We don't have to worry about polluting future calls to Entry#log()
|
||||||
|
// with this assignment because this function is declared with a
|
||||||
|
// non-pointer receiver.
|
||||||
|
if entry.Time.IsZero() {
|
||||||
entry.Time = time.Now()
|
entry.Time = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
entry.Level = level
|
entry.Level = level
|
||||||
entry.Message = msg
|
entry.Message = msg
|
||||||
|
if entry.Logger.ReportCaller {
|
||||||
if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
|
entry.Caller = getCaller()
|
||||||
entry.Logger.mu.Lock()
|
|
||||||
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
|
||||||
entry.Logger.mu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
entry.fireHooks()
|
||||||
|
|
||||||
buffer = bufferPool.Get().(*bytes.Buffer)
|
buffer = bufferPool.Get().(*bytes.Buffer)
|
||||||
buffer.Reset()
|
buffer.Reset()
|
||||||
defer bufferPool.Put(buffer)
|
defer bufferPool.Put(buffer)
|
||||||
entry.Buffer = buffer
|
entry.Buffer = buffer
|
||||||
serialized, err := entry.Logger.Formatter.Format(&entry)
|
|
||||||
|
entry.write()
|
||||||
|
|
||||||
entry.Buffer = nil
|
entry.Buffer = nil
|
||||||
if err != nil {
|
|
||||||
entry.Logger.mu.Lock()
|
|
||||||
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
|
||||||
entry.Logger.mu.Unlock()
|
|
||||||
} else {
|
|
||||||
entry.Logger.mu.Lock()
|
|
||||||
_, err = entry.Logger.Out.Write(serialized)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
|
||||||
}
|
|
||||||
entry.Logger.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// To avoid Entry#log() returning a value that only would make sense for
|
// To avoid Entry#log() returning a value that only would make sense for
|
||||||
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
||||||
|
@ -125,8 +217,37 @@ func (entry Entry) log(level Level, msg string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) fireHooks() {
|
||||||
|
entry.Logger.mu.Lock()
|
||||||
|
defer entry.Logger.mu.Unlock()
|
||||||
|
err := entry.Logger.Hooks.Fire(entry.Level, entry)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) write() {
|
||||||
|
entry.Logger.mu.Lock()
|
||||||
|
defer entry.Logger.mu.Unlock()
|
||||||
|
serialized, err := entry.Logger.Formatter.Format(entry)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
||||||
|
} else {
|
||||||
|
_, err = entry.Logger.Out.Write(serialized)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Trace(args ...interface{}) {
|
||||||
|
if entry.Logger.IsLevelEnabled(TraceLevel) {
|
||||||
|
entry.log(TraceLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (entry *Entry) Debug(args ...interface{}) {
|
func (entry *Entry) Debug(args ...interface{}) {
|
||||||
if entry.Logger.Level >= DebugLevel {
|
if entry.Logger.IsLevelEnabled(DebugLevel) {
|
||||||
entry.log(DebugLevel, fmt.Sprint(args...))
|
entry.log(DebugLevel, fmt.Sprint(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -136,13 +257,13 @@ func (entry *Entry) Print(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Info(args ...interface{}) {
|
func (entry *Entry) Info(args ...interface{}) {
|
||||||
if entry.Logger.Level >= InfoLevel {
|
if entry.Logger.IsLevelEnabled(InfoLevel) {
|
||||||
entry.log(InfoLevel, fmt.Sprint(args...))
|
entry.log(InfoLevel, fmt.Sprint(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Warn(args ...interface{}) {
|
func (entry *Entry) Warn(args ...interface{}) {
|
||||||
if entry.Logger.Level >= WarnLevel {
|
if entry.Logger.IsLevelEnabled(WarnLevel) {
|
||||||
entry.log(WarnLevel, fmt.Sprint(args...))
|
entry.log(WarnLevel, fmt.Sprint(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -152,20 +273,20 @@ func (entry *Entry) Warning(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Error(args ...interface{}) {
|
func (entry *Entry) Error(args ...interface{}) {
|
||||||
if entry.Logger.Level >= ErrorLevel {
|
if entry.Logger.IsLevelEnabled(ErrorLevel) {
|
||||||
entry.log(ErrorLevel, fmt.Sprint(args...))
|
entry.log(ErrorLevel, fmt.Sprint(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Fatal(args ...interface{}) {
|
func (entry *Entry) Fatal(args ...interface{}) {
|
||||||
if entry.Logger.Level >= FatalLevel {
|
if entry.Logger.IsLevelEnabled(FatalLevel) {
|
||||||
entry.log(FatalLevel, fmt.Sprint(args...))
|
entry.log(FatalLevel, fmt.Sprint(args...))
|
||||||
}
|
}
|
||||||
Exit(1)
|
entry.Logger.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Panic(args ...interface{}) {
|
func (entry *Entry) Panic(args ...interface{}) {
|
||||||
if entry.Logger.Level >= PanicLevel {
|
if entry.Logger.IsLevelEnabled(PanicLevel) {
|
||||||
entry.log(PanicLevel, fmt.Sprint(args...))
|
entry.log(PanicLevel, fmt.Sprint(args...))
|
||||||
}
|
}
|
||||||
panic(fmt.Sprint(args...))
|
panic(fmt.Sprint(args...))
|
||||||
|
@ -173,14 +294,20 @@ func (entry *Entry) Panic(args ...interface{}) {
|
||||||
|
|
||||||
// Entry Printf family functions
|
// Entry Printf family functions
|
||||||
|
|
||||||
|
func (entry *Entry) Tracef(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.IsLevelEnabled(TraceLevel) {
|
||||||
|
entry.Trace(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
||||||
if entry.Logger.Level >= DebugLevel {
|
if entry.Logger.IsLevelEnabled(DebugLevel) {
|
||||||
entry.Debug(fmt.Sprintf(format, args...))
|
entry.Debug(fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Infof(format string, args ...interface{}) {
|
func (entry *Entry) Infof(format string, args ...interface{}) {
|
||||||
if entry.Logger.Level >= InfoLevel {
|
if entry.Logger.IsLevelEnabled(InfoLevel) {
|
||||||
entry.Info(fmt.Sprintf(format, args...))
|
entry.Info(fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -190,7 +317,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
||||||
if entry.Logger.Level >= WarnLevel {
|
if entry.Logger.IsLevelEnabled(WarnLevel) {
|
||||||
entry.Warn(fmt.Sprintf(format, args...))
|
entry.Warn(fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -200,34 +327,40 @@ func (entry *Entry) Warningf(format string, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
||||||
if entry.Logger.Level >= ErrorLevel {
|
if entry.Logger.IsLevelEnabled(ErrorLevel) {
|
||||||
entry.Error(fmt.Sprintf(format, args...))
|
entry.Error(fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||||
if entry.Logger.Level >= FatalLevel {
|
if entry.Logger.IsLevelEnabled(FatalLevel) {
|
||||||
entry.Fatal(fmt.Sprintf(format, args...))
|
entry.Fatal(fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
Exit(1)
|
entry.Logger.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||||
if entry.Logger.Level >= PanicLevel {
|
if entry.Logger.IsLevelEnabled(PanicLevel) {
|
||||||
entry.Panic(fmt.Sprintf(format, args...))
|
entry.Panic(fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Entry Println family functions
|
// Entry Println family functions
|
||||||
|
|
||||||
|
func (entry *Entry) Traceln(args ...interface{}) {
|
||||||
|
if entry.Logger.IsLevelEnabled(TraceLevel) {
|
||||||
|
entry.Trace(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (entry *Entry) Debugln(args ...interface{}) {
|
func (entry *Entry) Debugln(args ...interface{}) {
|
||||||
if entry.Logger.Level >= DebugLevel {
|
if entry.Logger.IsLevelEnabled(DebugLevel) {
|
||||||
entry.Debug(entry.sprintlnn(args...))
|
entry.Debug(entry.sprintlnn(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Infoln(args ...interface{}) {
|
func (entry *Entry) Infoln(args ...interface{}) {
|
||||||
if entry.Logger.Level >= InfoLevel {
|
if entry.Logger.IsLevelEnabled(InfoLevel) {
|
||||||
entry.Info(entry.sprintlnn(args...))
|
entry.Info(entry.sprintlnn(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -237,7 +370,7 @@ func (entry *Entry) Println(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Warnln(args ...interface{}) {
|
func (entry *Entry) Warnln(args ...interface{}) {
|
||||||
if entry.Logger.Level >= WarnLevel {
|
if entry.Logger.IsLevelEnabled(WarnLevel) {
|
||||||
entry.Warn(entry.sprintlnn(args...))
|
entry.Warn(entry.sprintlnn(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -247,20 +380,20 @@ func (entry *Entry) Warningln(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Errorln(args ...interface{}) {
|
func (entry *Entry) Errorln(args ...interface{}) {
|
||||||
if entry.Logger.Level >= ErrorLevel {
|
if entry.Logger.IsLevelEnabled(ErrorLevel) {
|
||||||
entry.Error(entry.sprintlnn(args...))
|
entry.Error(entry.sprintlnn(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Fatalln(args ...interface{}) {
|
func (entry *Entry) Fatalln(args ...interface{}) {
|
||||||
if entry.Logger.Level >= FatalLevel {
|
if entry.Logger.IsLevelEnabled(FatalLevel) {
|
||||||
entry.Fatal(entry.sprintlnn(args...))
|
entry.Fatal(entry.sprintlnn(args...))
|
||||||
}
|
}
|
||||||
Exit(1)
|
entry.Logger.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Panicln(args ...interface{}) {
|
func (entry *Entry) Panicln(args ...interface{}) {
|
||||||
if entry.Logger.Level >= PanicLevel {
|
if entry.Logger.IsLevelEnabled(PanicLevel) {
|
||||||
entry.Panic(entry.sprintlnn(args...))
|
entry.Panic(entry.sprintlnn(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
62
vendor/github.com/Sirupsen/logrus/exported.go
generated
vendored
62
vendor/github.com/Sirupsen/logrus/exported.go
generated
vendored
|
@ -2,6 +2,7 @@ package logrus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -15,37 +16,38 @@ func StandardLogger() *Logger {
|
||||||
|
|
||||||
// SetOutput sets the standard logger output.
|
// SetOutput sets the standard logger output.
|
||||||
func SetOutput(out io.Writer) {
|
func SetOutput(out io.Writer) {
|
||||||
std.mu.Lock()
|
std.SetOutput(out)
|
||||||
defer std.mu.Unlock()
|
|
||||||
std.Out = out
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetFormatter sets the standard logger formatter.
|
// SetFormatter sets the standard logger formatter.
|
||||||
func SetFormatter(formatter Formatter) {
|
func SetFormatter(formatter Formatter) {
|
||||||
std.mu.Lock()
|
std.SetFormatter(formatter)
|
||||||
defer std.mu.Unlock()
|
}
|
||||||
std.Formatter = formatter
|
|
||||||
|
// SetReportCaller sets whether the standard logger will include the calling
|
||||||
|
// method as a field.
|
||||||
|
func SetReportCaller(include bool) {
|
||||||
|
std.SetReportCaller(include)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetLevel sets the standard logger level.
|
// SetLevel sets the standard logger level.
|
||||||
func SetLevel(level Level) {
|
func SetLevel(level Level) {
|
||||||
std.mu.Lock()
|
std.SetLevel(level)
|
||||||
defer std.mu.Unlock()
|
|
||||||
std.Level = level
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLevel returns the standard logger level.
|
// GetLevel returns the standard logger level.
|
||||||
func GetLevel() Level {
|
func GetLevel() Level {
|
||||||
std.mu.Lock()
|
return std.GetLevel()
|
||||||
defer std.mu.Unlock()
|
}
|
||||||
return std.Level
|
|
||||||
|
// IsLevelEnabled checks if the log level of the standard logger is greater than the level param
|
||||||
|
func IsLevelEnabled(level Level) bool {
|
||||||
|
return std.IsLevelEnabled(level)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddHook adds a hook to the standard logger hooks.
|
// AddHook adds a hook to the standard logger hooks.
|
||||||
func AddHook(hook Hook) {
|
func AddHook(hook Hook) {
|
||||||
std.mu.Lock()
|
std.AddHook(hook)
|
||||||
defer std.mu.Unlock()
|
|
||||||
std.Hooks.Add(hook)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
|
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
|
||||||
|
@ -72,6 +74,20 @@ func WithFields(fields Fields) *Entry {
|
||||||
return std.WithFields(fields)
|
return std.WithFields(fields)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithTime creats an entry from the standard logger and overrides the time of
|
||||||
|
// logs generated with it.
|
||||||
|
//
|
||||||
|
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||||
|
// or Panic on the Entry it returns.
|
||||||
|
func WithTime(t time.Time) *Entry {
|
||||||
|
return std.WithTime(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trace logs a message at level Trace on the standard logger.
|
||||||
|
func Trace(args ...interface{}) {
|
||||||
|
std.Trace(args...)
|
||||||
|
}
|
||||||
|
|
||||||
// Debug logs a message at level Debug on the standard logger.
|
// Debug logs a message at level Debug on the standard logger.
|
||||||
func Debug(args ...interface{}) {
|
func Debug(args ...interface{}) {
|
||||||
std.Debug(args...)
|
std.Debug(args...)
|
||||||
|
@ -107,11 +123,16 @@ func Panic(args ...interface{}) {
|
||||||
std.Panic(args...)
|
std.Panic(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fatal logs a message at level Fatal on the standard logger.
|
// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
|
||||||
func Fatal(args ...interface{}) {
|
func Fatal(args ...interface{}) {
|
||||||
std.Fatal(args...)
|
std.Fatal(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tracef logs a message at level Trace on the standard logger.
|
||||||
|
func Tracef(format string, args ...interface{}) {
|
||||||
|
std.Tracef(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
// Debugf logs a message at level Debug on the standard logger.
|
// Debugf logs a message at level Debug on the standard logger.
|
||||||
func Debugf(format string, args ...interface{}) {
|
func Debugf(format string, args ...interface{}) {
|
||||||
std.Debugf(format, args...)
|
std.Debugf(format, args...)
|
||||||
|
@ -147,11 +168,16 @@ func Panicf(format string, args ...interface{}) {
|
||||||
std.Panicf(format, args...)
|
std.Panicf(format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fatalf logs a message at level Fatal on the standard logger.
|
// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
|
||||||
func Fatalf(format string, args ...interface{}) {
|
func Fatalf(format string, args ...interface{}) {
|
||||||
std.Fatalf(format, args...)
|
std.Fatalf(format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Traceln logs a message at level Trace on the standard logger.
|
||||||
|
func Traceln(args ...interface{}) {
|
||||||
|
std.Traceln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
// Debugln logs a message at level Debug on the standard logger.
|
// Debugln logs a message at level Debug on the standard logger.
|
||||||
func Debugln(args ...interface{}) {
|
func Debugln(args ...interface{}) {
|
||||||
std.Debugln(args...)
|
std.Debugln(args...)
|
||||||
|
@ -187,7 +213,7 @@ func Panicln(args ...interface{}) {
|
||||||
std.Panicln(args...)
|
std.Panicln(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fatalln logs a message at level Fatal on the standard logger.
|
// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
|
||||||
func Fatalln(args ...interface{}) {
|
func Fatalln(args ...interface{}) {
|
||||||
std.Fatalln(args...)
|
std.Fatalln(args...)
|
||||||
}
|
}
|
||||||
|
|
51
vendor/github.com/Sirupsen/logrus/formatter.go
generated
vendored
51
vendor/github.com/Sirupsen/logrus/formatter.go
generated
vendored
|
@ -2,7 +2,16 @@ package logrus
|
||||||
|
|
||||||
import "time"
|
import "time"
|
||||||
|
|
||||||
const DefaultTimestampFormat = time.RFC3339
|
// Default key names for the default fields
|
||||||
|
const (
|
||||||
|
defaultTimestampFormat = time.RFC3339
|
||||||
|
FieldKeyMsg = "msg"
|
||||||
|
FieldKeyLevel = "level"
|
||||||
|
FieldKeyTime = "time"
|
||||||
|
FieldKeyLogrusError = "logrus_error"
|
||||||
|
FieldKeyFunc = "func"
|
||||||
|
FieldKeyFile = "file"
|
||||||
|
)
|
||||||
|
|
||||||
// The Formatter interface is used to implement a custom Formatter. It takes an
|
// The Formatter interface is used to implement a custom Formatter. It takes an
|
||||||
// `Entry`. It exposes all the fields, including the default ones:
|
// `Entry`. It exposes all the fields, including the default ones:
|
||||||
|
@ -18,7 +27,7 @@ type Formatter interface {
|
||||||
Format(*Entry) ([]byte, error)
|
Format(*Entry) ([]byte, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is to not silently overwrite `time`, `msg` and `level` fields when
|
// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when
|
||||||
// dumping it. If this code wasn't there doing:
|
// dumping it. If this code wasn't there doing:
|
||||||
//
|
//
|
||||||
// logrus.WithField("level", 1).Info("hello")
|
// logrus.WithField("level", 1).Info("hello")
|
||||||
|
@ -30,16 +39,40 @@ type Formatter interface {
|
||||||
//
|
//
|
||||||
// It's not exported because it's still using Data in an opinionated way. It's to
|
// It's not exported because it's still using Data in an opinionated way. It's to
|
||||||
// avoid code duplication between the two default formatters.
|
// avoid code duplication between the two default formatters.
|
||||||
func prefixFieldClashes(data Fields) {
|
func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) {
|
||||||
if t, ok := data["time"]; ok {
|
timeKey := fieldMap.resolve(FieldKeyTime)
|
||||||
data["fields.time"] = t
|
if t, ok := data[timeKey]; ok {
|
||||||
|
data["fields."+timeKey] = t
|
||||||
|
delete(data, timeKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
if m, ok := data["msg"]; ok {
|
msgKey := fieldMap.resolve(FieldKeyMsg)
|
||||||
data["fields.msg"] = m
|
if m, ok := data[msgKey]; ok {
|
||||||
|
data["fields."+msgKey] = m
|
||||||
|
delete(data, msgKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
if l, ok := data["level"]; ok {
|
levelKey := fieldMap.resolve(FieldKeyLevel)
|
||||||
data["fields.level"] = l
|
if l, ok := data[levelKey]; ok {
|
||||||
|
data["fields."+levelKey] = l
|
||||||
|
delete(data, levelKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
logrusErrKey := fieldMap.resolve(FieldKeyLogrusError)
|
||||||
|
if l, ok := data[logrusErrKey]; ok {
|
||||||
|
data["fields."+logrusErrKey] = l
|
||||||
|
delete(data, logrusErrKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If reportCaller is not set, 'func' will not conflict.
|
||||||
|
if reportCaller {
|
||||||
|
funcKey := fieldMap.resolve(FieldKeyFunc)
|
||||||
|
if l, ok := data[funcKey]; ok {
|
||||||
|
data["fields."+funcKey] = l
|
||||||
|
}
|
||||||
|
fileKey := fieldMap.resolve(FieldKeyFile)
|
||||||
|
if l, ok := data[fileKey]; ok {
|
||||||
|
data["fields."+fileKey] = l
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
84
vendor/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
84
vendor/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
|
@ -1,41 +1,105 @@
|
||||||
package logrus
|
package logrus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type fieldKey string
|
||||||
|
|
||||||
|
// FieldMap allows customization of the key names for default fields.
|
||||||
|
type FieldMap map[fieldKey]string
|
||||||
|
|
||||||
|
func (f FieldMap) resolve(key fieldKey) string {
|
||||||
|
if k, ok := f[key]; ok {
|
||||||
|
return k
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSONFormatter formats logs into parsable json
|
||||||
type JSONFormatter struct {
|
type JSONFormatter struct {
|
||||||
// TimestampFormat sets the format used for marshaling timestamps.
|
// TimestampFormat sets the format used for marshaling timestamps.
|
||||||
TimestampFormat string
|
TimestampFormat string
|
||||||
|
|
||||||
|
// DisableTimestamp allows disabling automatic timestamps in output
|
||||||
|
DisableTimestamp bool
|
||||||
|
|
||||||
|
// DataKey allows users to put all the log entry parameters into a nested dictionary at a given key.
|
||||||
|
DataKey string
|
||||||
|
|
||||||
|
// FieldMap allows users to customize the names of keys for default fields.
|
||||||
|
// As an example:
|
||||||
|
// formatter := &JSONFormatter{
|
||||||
|
// FieldMap: FieldMap{
|
||||||
|
// FieldKeyTime: "@timestamp",
|
||||||
|
// FieldKeyLevel: "@level",
|
||||||
|
// FieldKeyMsg: "@message",
|
||||||
|
// FieldKeyFunc: "@caller",
|
||||||
|
// },
|
||||||
|
// }
|
||||||
|
FieldMap FieldMap
|
||||||
|
|
||||||
|
// PrettyPrint will indent all json logs
|
||||||
|
PrettyPrint bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Format renders a single log entry
|
||||||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
data := make(Fields, len(entry.Data)+3)
|
data := make(Fields, len(entry.Data)+4)
|
||||||
for k, v := range entry.Data {
|
for k, v := range entry.Data {
|
||||||
switch v := v.(type) {
|
switch v := v.(type) {
|
||||||
case error:
|
case error:
|
||||||
// Otherwise errors are ignored by `encoding/json`
|
// Otherwise errors are ignored by `encoding/json`
|
||||||
// https://github.com/Sirupsen/logrus/issues/137
|
// https://github.com/sirupsen/logrus/issues/137
|
||||||
data[k] = v.Error()
|
data[k] = v.Error()
|
||||||
default:
|
default:
|
||||||
data[k] = v
|
data[k] = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
prefixFieldClashes(data)
|
|
||||||
|
if f.DataKey != "" {
|
||||||
|
newData := make(Fields, 4)
|
||||||
|
newData[f.DataKey] = data
|
||||||
|
data = newData
|
||||||
|
}
|
||||||
|
|
||||||
|
prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
|
||||||
|
|
||||||
timestampFormat := f.TimestampFormat
|
timestampFormat := f.TimestampFormat
|
||||||
if timestampFormat == "" {
|
if timestampFormat == "" {
|
||||||
timestampFormat = DefaultTimestampFormat
|
timestampFormat = defaultTimestampFormat
|
||||||
}
|
}
|
||||||
|
|
||||||
data["time"] = entry.Time.Format(timestampFormat)
|
if entry.err != "" {
|
||||||
data["msg"] = entry.Message
|
data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err
|
||||||
data["level"] = entry.Level.String()
|
}
|
||||||
|
if !f.DisableTimestamp {
|
||||||
|
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
|
||||||
|
}
|
||||||
|
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
|
||||||
|
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
|
||||||
|
if entry.HasCaller() {
|
||||||
|
data[f.FieldMap.resolve(FieldKeyFunc)] = entry.Caller.Function
|
||||||
|
data[f.FieldMap.resolve(FieldKeyFile)] = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
|
||||||
|
}
|
||||||
|
|
||||||
serialized, err := json.Marshal(data)
|
var b *bytes.Buffer
|
||||||
if err != nil {
|
if entry.Buffer != nil {
|
||||||
|
b = entry.Buffer
|
||||||
|
} else {
|
||||||
|
b = &bytes.Buffer{}
|
||||||
|
}
|
||||||
|
|
||||||
|
encoder := json.NewEncoder(b)
|
||||||
|
if f.PrettyPrint {
|
||||||
|
encoder.SetIndent("", " ")
|
||||||
|
}
|
||||||
|
if err := encoder.Encode(data); err != nil {
|
||||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||||
}
|
}
|
||||||
return append(serialized, '\n'), nil
|
|
||||||
|
return b.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
161
vendor/github.com/Sirupsen/logrus/logger.go
generated
vendored
161
vendor/github.com/Sirupsen/logrus/logger.go
generated
vendored
|
@ -4,12 +4,14 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Logger struct {
|
type Logger struct {
|
||||||
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
|
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
|
||||||
// file, or leave it default which is `os.Stderr`. You can also set this to
|
// file, or leave it default which is `os.Stderr`. You can also set this to
|
||||||
// something more adventorous, such as logging to Kafka.
|
// something more adventurous, such as logging to Kafka.
|
||||||
Out io.Writer
|
Out io.Writer
|
||||||
// Hooks for the logger instance. These allow firing events based on logging
|
// Hooks for the logger instance. These allow firing events based on logging
|
||||||
// levels and log entries. For example, to send errors to an error tracking
|
// levels and log entries. For example, to send errors to an error tracking
|
||||||
|
@ -22,16 +24,24 @@ type Logger struct {
|
||||||
// own that implements the `Formatter` interface, see the `README` or included
|
// own that implements the `Formatter` interface, see the `README` or included
|
||||||
// formatters for examples.
|
// formatters for examples.
|
||||||
Formatter Formatter
|
Formatter Formatter
|
||||||
|
|
||||||
|
// Flag for whether to log caller info (off by default)
|
||||||
|
ReportCaller bool
|
||||||
|
|
||||||
// The logging level the logger should log at. This is typically (and defaults
|
// The logging level the logger should log at. This is typically (and defaults
|
||||||
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
||||||
// logged. `logrus.Debug` is useful in
|
// logged.
|
||||||
Level Level
|
Level Level
|
||||||
// Used to sync writing to the log. Locking is enabled by Default
|
// Used to sync writing to the log. Locking is enabled by Default
|
||||||
mu MutexWrap
|
mu MutexWrap
|
||||||
// Reusable empty entry
|
// Reusable empty entry
|
||||||
entryPool sync.Pool
|
entryPool sync.Pool
|
||||||
|
// Function to exit the application, defaults to `os.Exit()`
|
||||||
|
ExitFunc exitFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type exitFunc func(int)
|
||||||
|
|
||||||
type MutexWrap struct {
|
type MutexWrap struct {
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
disabled bool
|
disabled bool
|
||||||
|
@ -71,6 +81,8 @@ func New() *Logger {
|
||||||
Formatter: new(TextFormatter),
|
Formatter: new(TextFormatter),
|
||||||
Hooks: make(LevelHooks),
|
Hooks: make(LevelHooks),
|
||||||
Level: InfoLevel,
|
Level: InfoLevel,
|
||||||
|
ExitFunc: os.Exit,
|
||||||
|
ReportCaller: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -83,11 +95,12 @@ func (logger *Logger) newEntry() *Entry {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) releaseEntry(entry *Entry) {
|
func (logger *Logger) releaseEntry(entry *Entry) {
|
||||||
|
entry.Data = map[string]interface{}{}
|
||||||
logger.entryPool.Put(entry)
|
logger.entryPool.Put(entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds a field to the log entry, note that it doesn't log until you call
|
// Adds a field to the log entry, note that it doesn't log until you call
|
||||||
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
|
// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry.
|
||||||
// If you want multiple fields, use `WithFields`.
|
// If you want multiple fields, use `WithFields`.
|
||||||
func (logger *Logger) WithField(key string, value interface{}) *Entry {
|
func (logger *Logger) WithField(key string, value interface{}) *Entry {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
|
@ -111,8 +124,23 @@ func (logger *Logger) WithError(err error) *Entry {
|
||||||
return entry.WithError(err)
|
return entry.WithError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Overrides the time of the log entry.
|
||||||
|
func (logger *Logger) WithTime(t time.Time) *Entry {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
defer logger.releaseEntry(entry)
|
||||||
|
return entry.WithTime(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Tracef(format string, args ...interface{}) {
|
||||||
|
if logger.IsLevelEnabled(TraceLevel) {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Tracef(format, args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||||
if logger.Level >= DebugLevel {
|
if logger.IsLevelEnabled(DebugLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Debugf(format, args...)
|
entry.Debugf(format, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -120,7 +148,7 @@ func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Infof(format string, args ...interface{}) {
|
func (logger *Logger) Infof(format string, args ...interface{}) {
|
||||||
if logger.Level >= InfoLevel {
|
if logger.IsLevelEnabled(InfoLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Infof(format, args...)
|
entry.Infof(format, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -134,7 +162,7 @@ func (logger *Logger) Printf(format string, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||||
if logger.Level >= WarnLevel {
|
if logger.IsLevelEnabled(WarnLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Warnf(format, args...)
|
entry.Warnf(format, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -142,7 +170,7 @@ func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||||
if logger.Level >= WarnLevel {
|
if logger.IsLevelEnabled(WarnLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Warnf(format, args...)
|
entry.Warnf(format, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -150,7 +178,7 @@ func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||||
if logger.Level >= ErrorLevel {
|
if logger.IsLevelEnabled(ErrorLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Errorf(format, args...)
|
entry.Errorf(format, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -158,24 +186,32 @@ func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||||
if logger.Level >= FatalLevel {
|
if logger.IsLevelEnabled(FatalLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Fatalf(format, args...)
|
entry.Fatalf(format, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
}
|
}
|
||||||
Exit(1)
|
logger.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||||
if logger.Level >= PanicLevel {
|
if logger.IsLevelEnabled(PanicLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Panicf(format, args...)
|
entry.Panicf(format, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Trace(args ...interface{}) {
|
||||||
|
if logger.IsLevelEnabled(TraceLevel) {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Trace(args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (logger *Logger) Debug(args ...interface{}) {
|
func (logger *Logger) Debug(args ...interface{}) {
|
||||||
if logger.Level >= DebugLevel {
|
if logger.IsLevelEnabled(DebugLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Debug(args...)
|
entry.Debug(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -183,7 +219,7 @@ func (logger *Logger) Debug(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Info(args ...interface{}) {
|
func (logger *Logger) Info(args ...interface{}) {
|
||||||
if logger.Level >= InfoLevel {
|
if logger.IsLevelEnabled(InfoLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Info(args...)
|
entry.Info(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -197,7 +233,7 @@ func (logger *Logger) Print(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warn(args ...interface{}) {
|
func (logger *Logger) Warn(args ...interface{}) {
|
||||||
if logger.Level >= WarnLevel {
|
if logger.IsLevelEnabled(WarnLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Warn(args...)
|
entry.Warn(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -205,7 +241,7 @@ func (logger *Logger) Warn(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warning(args ...interface{}) {
|
func (logger *Logger) Warning(args ...interface{}) {
|
||||||
if logger.Level >= WarnLevel {
|
if logger.IsLevelEnabled(WarnLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Warn(args...)
|
entry.Warn(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -213,7 +249,7 @@ func (logger *Logger) Warning(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Error(args ...interface{}) {
|
func (logger *Logger) Error(args ...interface{}) {
|
||||||
if logger.Level >= ErrorLevel {
|
if logger.IsLevelEnabled(ErrorLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Error(args...)
|
entry.Error(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -221,24 +257,32 @@ func (logger *Logger) Error(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Fatal(args ...interface{}) {
|
func (logger *Logger) Fatal(args ...interface{}) {
|
||||||
if logger.Level >= FatalLevel {
|
if logger.IsLevelEnabled(FatalLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Fatal(args...)
|
entry.Fatal(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
}
|
}
|
||||||
Exit(1)
|
logger.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Panic(args ...interface{}) {
|
func (logger *Logger) Panic(args ...interface{}) {
|
||||||
if logger.Level >= PanicLevel {
|
if logger.IsLevelEnabled(PanicLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Panic(args...)
|
entry.Panic(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Traceln(args ...interface{}) {
|
||||||
|
if logger.IsLevelEnabled(TraceLevel) {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Traceln(args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (logger *Logger) Debugln(args ...interface{}) {
|
func (logger *Logger) Debugln(args ...interface{}) {
|
||||||
if logger.Level >= DebugLevel {
|
if logger.IsLevelEnabled(DebugLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Debugln(args...)
|
entry.Debugln(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -246,7 +290,7 @@ func (logger *Logger) Debugln(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Infoln(args ...interface{}) {
|
func (logger *Logger) Infoln(args ...interface{}) {
|
||||||
if logger.Level >= InfoLevel {
|
if logger.IsLevelEnabled(InfoLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Infoln(args...)
|
entry.Infoln(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -260,7 +304,7 @@ func (logger *Logger) Println(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warnln(args ...interface{}) {
|
func (logger *Logger) Warnln(args ...interface{}) {
|
||||||
if logger.Level >= WarnLevel {
|
if logger.IsLevelEnabled(WarnLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Warnln(args...)
|
entry.Warnln(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -268,7 +312,7 @@ func (logger *Logger) Warnln(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warningln(args ...interface{}) {
|
func (logger *Logger) Warningln(args ...interface{}) {
|
||||||
if logger.Level >= WarnLevel {
|
if logger.IsLevelEnabled(WarnLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Warnln(args...)
|
entry.Warnln(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -276,7 +320,7 @@ func (logger *Logger) Warningln(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Errorln(args ...interface{}) {
|
func (logger *Logger) Errorln(args ...interface{}) {
|
||||||
if logger.Level >= ErrorLevel {
|
if logger.IsLevelEnabled(ErrorLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Errorln(args...)
|
entry.Errorln(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -284,25 +328,88 @@ func (logger *Logger) Errorln(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Fatalln(args ...interface{}) {
|
func (logger *Logger) Fatalln(args ...interface{}) {
|
||||||
if logger.Level >= FatalLevel {
|
if logger.IsLevelEnabled(FatalLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Fatalln(args...)
|
entry.Fatalln(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
}
|
}
|
||||||
Exit(1)
|
logger.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Panicln(args ...interface{}) {
|
func (logger *Logger) Panicln(args ...interface{}) {
|
||||||
if logger.Level >= PanicLevel {
|
if logger.IsLevelEnabled(PanicLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Panicln(args...)
|
entry.Panicln(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Exit(code int) {
|
||||||
|
runHandlers()
|
||||||
|
if logger.ExitFunc == nil {
|
||||||
|
logger.ExitFunc = os.Exit
|
||||||
|
}
|
||||||
|
logger.ExitFunc(code)
|
||||||
|
}
|
||||||
|
|
||||||
//When file is opened with appending mode, it's safe to
|
//When file is opened with appending mode, it's safe to
|
||||||
//write concurrently to a file (within 4k message on Linux).
|
//write concurrently to a file (within 4k message on Linux).
|
||||||
//In these cases user can choose to disable the lock.
|
//In these cases user can choose to disable the lock.
|
||||||
func (logger *Logger) SetNoLock() {
|
func (logger *Logger) SetNoLock() {
|
||||||
logger.mu.Disable()
|
logger.mu.Disable()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) level() Level {
|
||||||
|
return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetLevel sets the logger level.
|
||||||
|
func (logger *Logger) SetLevel(level Level) {
|
||||||
|
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLevel returns the logger level.
|
||||||
|
func (logger *Logger) GetLevel() Level {
|
||||||
|
return logger.level()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddHook adds a hook to the logger hooks.
|
||||||
|
func (logger *Logger) AddHook(hook Hook) {
|
||||||
|
logger.mu.Lock()
|
||||||
|
defer logger.mu.Unlock()
|
||||||
|
logger.Hooks.Add(hook)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsLevelEnabled checks if the log level of the logger is greater than the level param
|
||||||
|
func (logger *Logger) IsLevelEnabled(level Level) bool {
|
||||||
|
return logger.level() >= level
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetFormatter sets the logger formatter.
|
||||||
|
func (logger *Logger) SetFormatter(formatter Formatter) {
|
||||||
|
logger.mu.Lock()
|
||||||
|
defer logger.mu.Unlock()
|
||||||
|
logger.Formatter = formatter
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetOutput sets the logger output.
|
||||||
|
func (logger *Logger) SetOutput(output io.Writer) {
|
||||||
|
logger.mu.Lock()
|
||||||
|
defer logger.mu.Unlock()
|
||||||
|
logger.Out = output
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) SetReportCaller(reportCaller bool) {
|
||||||
|
logger.mu.Lock()
|
||||||
|
defer logger.mu.Unlock()
|
||||||
|
logger.ReportCaller = reportCaller
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplaceHooks replaces the logger hooks and returns the old ones
|
||||||
|
func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks {
|
||||||
|
logger.mu.Lock()
|
||||||
|
oldHooks := logger.Hooks
|
||||||
|
logger.Hooks = hooks
|
||||||
|
logger.mu.Unlock()
|
||||||
|
return oldHooks
|
||||||
|
}
|
||||||
|
|
39
vendor/github.com/Sirupsen/logrus/logrus.go
generated
vendored
39
vendor/github.com/Sirupsen/logrus/logrus.go
generated
vendored
|
@ -10,11 +10,13 @@ import (
|
||||||
type Fields map[string]interface{}
|
type Fields map[string]interface{}
|
||||||
|
|
||||||
// Level type
|
// Level type
|
||||||
type Level uint8
|
type Level uint32
|
||||||
|
|
||||||
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
||||||
func (level Level) String() string {
|
func (level Level) String() string {
|
||||||
switch level {
|
switch level {
|
||||||
|
case TraceLevel:
|
||||||
|
return "trace"
|
||||||
case DebugLevel:
|
case DebugLevel:
|
||||||
return "debug"
|
return "debug"
|
||||||
case InfoLevel:
|
case InfoLevel:
|
||||||
|
@ -47,12 +49,26 @@ func ParseLevel(lvl string) (Level, error) {
|
||||||
return InfoLevel, nil
|
return InfoLevel, nil
|
||||||
case "debug":
|
case "debug":
|
||||||
return DebugLevel, nil
|
return DebugLevel, nil
|
||||||
|
case "trace":
|
||||||
|
return TraceLevel, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var l Level
|
var l Level
|
||||||
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
|
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||||
|
func (level *Level) UnmarshalText(text []byte) error {
|
||||||
|
l, err := ParseLevel(string(text))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*level = Level(l)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// A constant exposing all logging levels
|
// A constant exposing all logging levels
|
||||||
var AllLevels = []Level{
|
var AllLevels = []Level{
|
||||||
PanicLevel,
|
PanicLevel,
|
||||||
|
@ -61,6 +77,7 @@ var AllLevels = []Level{
|
||||||
WarnLevel,
|
WarnLevel,
|
||||||
InfoLevel,
|
InfoLevel,
|
||||||
DebugLevel,
|
DebugLevel,
|
||||||
|
TraceLevel,
|
||||||
}
|
}
|
||||||
|
|
||||||
// These are the different logging levels. You can set the logging level to log
|
// These are the different logging levels. You can set the logging level to log
|
||||||
|
@ -69,7 +86,7 @@ const (
|
||||||
// PanicLevel level, highest level of severity. Logs and then calls panic with the
|
// PanicLevel level, highest level of severity. Logs and then calls panic with the
|
||||||
// message passed to Debug, Info, ...
|
// message passed to Debug, Info, ...
|
||||||
PanicLevel Level = iota
|
PanicLevel Level = iota
|
||||||
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
|
// FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the
|
||||||
// logging level is set to Panic.
|
// logging level is set to Panic.
|
||||||
FatalLevel
|
FatalLevel
|
||||||
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
|
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
|
||||||
|
@ -82,6 +99,8 @@ const (
|
||||||
InfoLevel
|
InfoLevel
|
||||||
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
|
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
|
||||||
DebugLevel
|
DebugLevel
|
||||||
|
// TraceLevel level. Designates finer-grained informational events than the Debug.
|
||||||
|
TraceLevel
|
||||||
)
|
)
|
||||||
|
|
||||||
// Won't compile if StdLogger can't be realized by a log.Logger
|
// Won't compile if StdLogger can't be realized by a log.Logger
|
||||||
|
@ -140,4 +159,20 @@ type FieldLogger interface {
|
||||||
Errorln(args ...interface{})
|
Errorln(args ...interface{})
|
||||||
Fatalln(args ...interface{})
|
Fatalln(args ...interface{})
|
||||||
Panicln(args ...interface{})
|
Panicln(args ...interface{})
|
||||||
|
|
||||||
|
// IsDebugEnabled() bool
|
||||||
|
// IsInfoEnabled() bool
|
||||||
|
// IsWarnEnabled() bool
|
||||||
|
// IsErrorEnabled() bool
|
||||||
|
// IsFatalEnabled() bool
|
||||||
|
// IsPanicEnabled() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is
|
||||||
|
// here for consistancy. Do not use. Use Logger or Entry instead.
|
||||||
|
type Ext1FieldLogger interface {
|
||||||
|
FieldLogger
|
||||||
|
Tracef(format string, args ...interface{})
|
||||||
|
Trace(args ...interface{})
|
||||||
|
Traceln(args ...interface{})
|
||||||
}
|
}
|
||||||
|
|
8
vendor/github.com/Sirupsen/logrus/terminal_appengine.go
generated
vendored
8
vendor/github.com/Sirupsen/logrus/terminal_appengine.go
generated
vendored
|
@ -1,8 +0,0 @@
|
||||||
// +build appengine
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
|
||||||
func IsTerminal() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
10
vendor/github.com/Sirupsen/logrus/terminal_bsd.go
generated
vendored
10
vendor/github.com/Sirupsen/logrus/terminal_bsd.go
generated
vendored
|
@ -1,10 +0,0 @@
|
||||||
// +build darwin freebsd openbsd netbsd dragonfly
|
|
||||||
// +build !appengine
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import "syscall"
|
|
||||||
|
|
||||||
const ioctlReadTermios = syscall.TIOCGETA
|
|
||||||
|
|
||||||
type Termios syscall.Termios
|
|
11
vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go
generated
vendored
Normal file
11
vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
// +build appengine
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
func checkIfTerminal(w io.Writer) bool {
|
||||||
|
return true
|
||||||
|
}
|
11
vendor/github.com/Sirupsen/logrus/terminal_check_js.go
generated
vendored
Normal file
11
vendor/github.com/Sirupsen/logrus/terminal_check_js.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
// +build js
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
func checkIfTerminal(w io.Writer) bool {
|
||||||
|
return false
|
||||||
|
}
|
19
vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go
generated
vendored
Normal file
19
vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
// +build !appengine,!js,!windows
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/ssh/terminal"
|
||||||
|
)
|
||||||
|
|
||||||
|
func checkIfTerminal(w io.Writer) bool {
|
||||||
|
switch v := w.(type) {
|
||||||
|
case *os.File:
|
||||||
|
return terminal.IsTerminal(int(v.Fd()))
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
20
vendor/github.com/Sirupsen/logrus/terminal_check_windows.go
generated
vendored
Normal file
20
vendor/github.com/Sirupsen/logrus/terminal_check_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
// +build !appengine,!js,windows
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func checkIfTerminal(w io.Writer) bool {
|
||||||
|
switch v := w.(type) {
|
||||||
|
case *os.File:
|
||||||
|
var mode uint32
|
||||||
|
err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode)
|
||||||
|
return err == nil
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
20
vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
generated
vendored
20
vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
generated
vendored
|
@ -1,22 +1,8 @@
|
||||||
// Based on ssh/terminal:
|
// +build !windows
|
||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build linux darwin freebsd openbsd netbsd dragonfly
|
|
||||||
// +build !appengine
|
|
||||||
|
|
||||||
package logrus
|
package logrus
|
||||||
|
|
||||||
import (
|
import "io"
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
func initTerminal(w io.Writer) {
|
||||||
func IsTerminal() bool {
|
|
||||||
fd := syscall.Stderr
|
|
||||||
var termios Termios
|
|
||||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
|
||||||
return err == 0
|
|
||||||
}
|
}
|
||||||
|
|
15
vendor/github.com/Sirupsen/logrus/terminal_solaris.go
generated
vendored
15
vendor/github.com/Sirupsen/logrus/terminal_solaris.go
generated
vendored
|
@ -1,15 +0,0 @@
|
||||||
// +build solaris,!appengine
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
|
||||||
func IsTerminal() bool {
|
|
||||||
_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
|
|
||||||
return err == nil
|
|
||||||
}
|
|
29
vendor/github.com/Sirupsen/logrus/terminal_windows.go
generated
vendored
29
vendor/github.com/Sirupsen/logrus/terminal_windows.go
generated
vendored
|
@ -1,27 +1,18 @@
|
||||||
// Based on ssh/terminal:
|
// +build !appengine,!js,windows
|
||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build windows,!appengine
|
|
||||||
|
|
||||||
package logrus
|
package logrus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
|
||||||
|
sequences "github.com/konsorten/go-windows-terminal-sequences"
|
||||||
)
|
)
|
||||||
|
|
||||||
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
func initTerminal(w io.Writer) {
|
||||||
|
switch v := w.(type) {
|
||||||
var (
|
case *os.File:
|
||||||
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true)
|
||||||
)
|
}
|
||||||
|
|
||||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
|
||||||
func IsTerminal() bool {
|
|
||||||
fd := syscall.Stderr
|
|
||||||
var st uint32
|
|
||||||
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
|
|
||||||
return r != 0 && e == 0
|
|
||||||
}
|
}
|
||||||
|
|
214
vendor/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
214
vendor/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
|
@ -3,9 +3,10 @@ package logrus
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"runtime"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -14,24 +15,20 @@ const (
|
||||||
red = 31
|
red = 31
|
||||||
green = 32
|
green = 32
|
||||||
yellow = 33
|
yellow = 33
|
||||||
blue = 34
|
blue = 36
|
||||||
gray = 37
|
gray = 37
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
baseTimestamp time.Time
|
baseTimestamp time.Time
|
||||||
isTerminal bool
|
emptyFieldMap FieldMap
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
baseTimestamp = time.Now()
|
baseTimestamp = time.Now()
|
||||||
isTerminal = IsTerminal()
|
|
||||||
}
|
|
||||||
|
|
||||||
func miniTS() int {
|
|
||||||
return int(time.Since(baseTimestamp) / time.Second)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TextFormatter formats logs into text
|
||||||
type TextFormatter struct {
|
type TextFormatter struct {
|
||||||
// Set to true to bypass checking for a TTY before outputting colors.
|
// Set to true to bypass checking for a TTY before outputting colors.
|
||||||
ForceColors bool
|
ForceColors bool
|
||||||
|
@ -39,6 +36,9 @@ type TextFormatter struct {
|
||||||
// Force disabling colors.
|
// Force disabling colors.
|
||||||
DisableColors bool
|
DisableColors bool
|
||||||
|
|
||||||
|
// Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/
|
||||||
|
EnvironmentOverrideColors bool
|
||||||
|
|
||||||
// Disable timestamp logging. useful when output is redirected to logging
|
// Disable timestamp logging. useful when output is redirected to logging
|
||||||
// system that already adds timestamps.
|
// system that already adds timestamps.
|
||||||
DisableTimestamp bool
|
DisableTimestamp bool
|
||||||
|
@ -54,45 +54,133 @@ type TextFormatter struct {
|
||||||
// that log extremely frequently and don't use the JSON formatter this may not
|
// that log extremely frequently and don't use the JSON formatter this may not
|
||||||
// be desired.
|
// be desired.
|
||||||
DisableSorting bool
|
DisableSorting bool
|
||||||
|
|
||||||
|
// The keys sorting function, when uninitialized it uses sort.Strings.
|
||||||
|
SortingFunc func([]string)
|
||||||
|
|
||||||
|
// Disables the truncation of the level text to 4 characters.
|
||||||
|
DisableLevelTruncation bool
|
||||||
|
|
||||||
|
// QuoteEmptyFields will wrap empty fields in quotes if true
|
||||||
|
QuoteEmptyFields bool
|
||||||
|
|
||||||
|
// Whether the logger's out is to a terminal
|
||||||
|
isTerminal bool
|
||||||
|
|
||||||
|
// FieldMap allows users to customize the names of keys for default fields.
|
||||||
|
// As an example:
|
||||||
|
// formatter := &TextFormatter{
|
||||||
|
// FieldMap: FieldMap{
|
||||||
|
// FieldKeyTime: "@timestamp",
|
||||||
|
// FieldKeyLevel: "@level",
|
||||||
|
// FieldKeyMsg: "@message"}}
|
||||||
|
FieldMap FieldMap
|
||||||
|
|
||||||
|
terminalInitOnce sync.Once
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *TextFormatter) init(entry *Entry) {
|
||||||
|
if entry.Logger != nil {
|
||||||
|
f.isTerminal = checkIfTerminal(entry.Logger.Out)
|
||||||
|
|
||||||
|
if f.isTerminal {
|
||||||
|
initTerminal(entry.Logger.Out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *TextFormatter) isColored() bool {
|
||||||
|
isColored := f.ForceColors || f.isTerminal
|
||||||
|
|
||||||
|
if f.EnvironmentOverrideColors {
|
||||||
|
if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" {
|
||||||
|
isColored = true
|
||||||
|
} else if ok && force == "0" {
|
||||||
|
isColored = false
|
||||||
|
} else if os.Getenv("CLICOLOR") == "0" {
|
||||||
|
isColored = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return isColored && !f.DisableColors
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format renders a single log entry
|
||||||
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
var b *bytes.Buffer
|
prefixFieldClashes(entry.Data, f.FieldMap, entry.HasCaller())
|
||||||
var keys []string = make([]string, 0, len(entry.Data))
|
|
||||||
|
keys := make([]string, 0, len(entry.Data))
|
||||||
for k := range entry.Data {
|
for k := range entry.Data {
|
||||||
keys = append(keys, k)
|
keys = append(keys, k)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !f.DisableSorting {
|
fixedKeys := make([]string, 0, 4+len(entry.Data))
|
||||||
sort.Strings(keys)
|
if !f.DisableTimestamp {
|
||||||
|
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime))
|
||||||
}
|
}
|
||||||
|
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel))
|
||||||
|
if entry.Message != "" {
|
||||||
|
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg))
|
||||||
|
}
|
||||||
|
if entry.err != "" {
|
||||||
|
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError))
|
||||||
|
}
|
||||||
|
if entry.HasCaller() {
|
||||||
|
fixedKeys = append(fixedKeys,
|
||||||
|
f.FieldMap.resolve(FieldKeyFunc), f.FieldMap.resolve(FieldKeyFile))
|
||||||
|
}
|
||||||
|
|
||||||
|
if !f.DisableSorting {
|
||||||
|
if f.SortingFunc == nil {
|
||||||
|
sort.Strings(keys)
|
||||||
|
fixedKeys = append(fixedKeys, keys...)
|
||||||
|
} else {
|
||||||
|
if !f.isColored() {
|
||||||
|
fixedKeys = append(fixedKeys, keys...)
|
||||||
|
f.SortingFunc(fixedKeys)
|
||||||
|
} else {
|
||||||
|
f.SortingFunc(keys)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fixedKeys = append(fixedKeys, keys...)
|
||||||
|
}
|
||||||
|
|
||||||
|
var b *bytes.Buffer
|
||||||
if entry.Buffer != nil {
|
if entry.Buffer != nil {
|
||||||
b = entry.Buffer
|
b = entry.Buffer
|
||||||
} else {
|
} else {
|
||||||
b = &bytes.Buffer{}
|
b = &bytes.Buffer{}
|
||||||
}
|
}
|
||||||
|
|
||||||
prefixFieldClashes(entry.Data)
|
f.terminalInitOnce.Do(func() { f.init(entry) })
|
||||||
|
|
||||||
isColorTerminal := isTerminal && (runtime.GOOS != "windows")
|
|
||||||
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
|
|
||||||
|
|
||||||
timestampFormat := f.TimestampFormat
|
timestampFormat := f.TimestampFormat
|
||||||
if timestampFormat == "" {
|
if timestampFormat == "" {
|
||||||
timestampFormat = DefaultTimestampFormat
|
timestampFormat = defaultTimestampFormat
|
||||||
}
|
}
|
||||||
if isColored {
|
if f.isColored() {
|
||||||
f.printColored(b, entry, keys, timestampFormat)
|
f.printColored(b, entry, keys, timestampFormat)
|
||||||
} else {
|
} else {
|
||||||
if !f.DisableTimestamp {
|
for _, key := range fixedKeys {
|
||||||
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
|
var value interface{}
|
||||||
|
switch {
|
||||||
|
case key == f.FieldMap.resolve(FieldKeyTime):
|
||||||
|
value = entry.Time.Format(timestampFormat)
|
||||||
|
case key == f.FieldMap.resolve(FieldKeyLevel):
|
||||||
|
value = entry.Level.String()
|
||||||
|
case key == f.FieldMap.resolve(FieldKeyMsg):
|
||||||
|
value = entry.Message
|
||||||
|
case key == f.FieldMap.resolve(FieldKeyLogrusError):
|
||||||
|
value = entry.err
|
||||||
|
case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller():
|
||||||
|
value = entry.Caller.Function
|
||||||
|
case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller():
|
||||||
|
value = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
|
||||||
|
default:
|
||||||
|
value = entry.Data[key]
|
||||||
}
|
}
|
||||||
f.appendKeyValue(b, "level", entry.Level.String())
|
f.appendKeyValue(b, key, value)
|
||||||
if entry.Message != "" {
|
|
||||||
f.appendKeyValue(b, "msg", entry.Message)
|
|
||||||
}
|
|
||||||
for _, key := range keys {
|
|
||||||
f.appendKeyValue(b, key, entry.Data[key])
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,7 +191,7 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
|
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
|
||||||
var levelColor int
|
var levelColor int
|
||||||
switch entry.Level {
|
switch entry.Level {
|
||||||
case DebugLevel:
|
case DebugLevel, TraceLevel:
|
||||||
levelColor = gray
|
levelColor = gray
|
||||||
case WarnLevel:
|
case WarnLevel:
|
||||||
levelColor = yellow
|
levelColor = yellow
|
||||||
|
@ -113,25 +201,45 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
|
||||||
levelColor = blue
|
levelColor = blue
|
||||||
}
|
}
|
||||||
|
|
||||||
levelText := strings.ToUpper(entry.Level.String())[0:4]
|
levelText := strings.ToUpper(entry.Level.String())
|
||||||
|
if !f.DisableLevelTruncation {
|
||||||
|
levelText = levelText[0:4]
|
||||||
|
}
|
||||||
|
|
||||||
if !f.FullTimestamp {
|
// Remove a single newline if it already exists in the message to keep
|
||||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
|
// the behavior of logrus text_formatter the same as the stdlib log package
|
||||||
|
entry.Message = strings.TrimSuffix(entry.Message, "\n")
|
||||||
|
|
||||||
|
caller := ""
|
||||||
|
|
||||||
|
if entry.HasCaller() {
|
||||||
|
caller = fmt.Sprintf("%s:%d %s()",
|
||||||
|
entry.Caller.File, entry.Caller.Line, entry.Caller.Function)
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.DisableTimestamp {
|
||||||
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message)
|
||||||
|
} else if !f.FullTimestamp {
|
||||||
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message)
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message)
|
||||||
}
|
}
|
||||||
for _, k := range keys {
|
for _, k := range keys {
|
||||||
v := entry.Data[k]
|
v := entry.Data[k]
|
||||||
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
|
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
|
||||||
|
f.appendValue(b, v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func needsQuoting(text string) bool {
|
func (f *TextFormatter) needsQuoting(text string) bool {
|
||||||
|
if f.QuoteEmptyFields && len(text) == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
for _, ch := range text {
|
for _, ch := range text {
|
||||||
if !((ch >= 'a' && ch <= 'z') ||
|
if !((ch >= 'a' && ch <= 'z') ||
|
||||||
(ch >= 'A' && ch <= 'Z') ||
|
(ch >= 'A' && ch <= 'Z') ||
|
||||||
(ch >= '0' && ch <= '9') ||
|
(ch >= '0' && ch <= '9') ||
|
||||||
ch == '-' || ch == '.') {
|
ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -139,27 +247,23 @@ func needsQuoting(text string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
|
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
|
||||||
|
if b.Len() > 0 {
|
||||||
|
b.WriteByte(' ')
|
||||||
|
}
|
||||||
b.WriteString(key)
|
b.WriteString(key)
|
||||||
b.WriteByte('=')
|
b.WriteByte('=')
|
||||||
|
f.appendValue(b, value)
|
||||||
switch value := value.(type) {
|
}
|
||||||
case string:
|
|
||||||
if !needsQuoting(value) {
|
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
|
||||||
b.WriteString(value)
|
stringVal, ok := value.(string)
|
||||||
} else {
|
if !ok {
|
||||||
fmt.Fprintf(b, "%q", value)
|
stringVal = fmt.Sprint(value)
|
||||||
}
|
}
|
||||||
case error:
|
|
||||||
errmsg := value.Error()
|
if !f.needsQuoting(stringVal) {
|
||||||
if !needsQuoting(errmsg) {
|
b.WriteString(stringVal)
|
||||||
b.WriteString(errmsg)
|
} else {
|
||||||
} else {
|
b.WriteString(fmt.Sprintf("%q", stringVal))
|
||||||
fmt.Fprintf(b, "%q", value)
|
}
|
||||||
}
|
|
||||||
default:
|
|
||||||
fmt.Fprint(b, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
b.WriteByte(' ')
|
|
||||||
}
|
}
|
||||||
|
|
31
vendor/github.com/Sirupsen/logrus/writer.go
generated
vendored
31
vendor/github.com/Sirupsen/logrus/writer.go
generated
vendored
|
@ -11,39 +11,50 @@ func (logger *Logger) Writer() *io.PipeWriter {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
|
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
|
||||||
|
return NewEntry(logger).WriterLevel(level)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Writer() *io.PipeWriter {
|
||||||
|
return entry.WriterLevel(InfoLevel)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
|
||||||
reader, writer := io.Pipe()
|
reader, writer := io.Pipe()
|
||||||
|
|
||||||
var printFunc func(args ...interface{})
|
var printFunc func(args ...interface{})
|
||||||
|
|
||||||
switch level {
|
switch level {
|
||||||
|
case TraceLevel:
|
||||||
|
printFunc = entry.Trace
|
||||||
case DebugLevel:
|
case DebugLevel:
|
||||||
printFunc = logger.Debug
|
printFunc = entry.Debug
|
||||||
case InfoLevel:
|
case InfoLevel:
|
||||||
printFunc = logger.Info
|
printFunc = entry.Info
|
||||||
case WarnLevel:
|
case WarnLevel:
|
||||||
printFunc = logger.Warn
|
printFunc = entry.Warn
|
||||||
case ErrorLevel:
|
case ErrorLevel:
|
||||||
printFunc = logger.Error
|
printFunc = entry.Error
|
||||||
case FatalLevel:
|
case FatalLevel:
|
||||||
printFunc = logger.Fatal
|
printFunc = entry.Fatal
|
||||||
case PanicLevel:
|
case PanicLevel:
|
||||||
printFunc = logger.Panic
|
printFunc = entry.Panic
|
||||||
default:
|
default:
|
||||||
printFunc = logger.Print
|
printFunc = entry.Print
|
||||||
}
|
}
|
||||||
|
|
||||||
go logger.writerScanner(reader, printFunc)
|
go entry.writerScanner(reader, printFunc)
|
||||||
runtime.SetFinalizer(writer, writerFinalizer)
|
runtime.SetFinalizer(writer, writerFinalizer)
|
||||||
|
|
||||||
return writer
|
return writer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
|
func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
|
||||||
scanner := bufio.NewScanner(reader)
|
scanner := bufio.NewScanner(reader)
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
printFunc(scanner.Text())
|
printFunc(scanner.Text())
|
||||||
}
|
}
|
||||||
if err := scanner.Err(); err != nil {
|
if err := scanner.Err(); err != nil {
|
||||||
logger.Errorf("Error while reading from Writer: %s", err)
|
entry.Errorf("Error while reading from Writer: %s", err)
|
||||||
}
|
}
|
||||||
reader.Close()
|
reader.Close()
|
||||||
}
|
}
|
||||||
|
|
23
vendor/github.com/fatih/structs/.gitignore
generated
vendored
23
vendor/github.com/fatih/structs/.gitignore
generated
vendored
|
@ -1,23 +0,0 @@
|
||||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
|
||||||
*.o
|
|
||||||
*.a
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Folders
|
|
||||||
_obj
|
|
||||||
_test
|
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
|
||||||
*.[568vq]
|
|
||||||
[568vq].out
|
|
||||||
|
|
||||||
*.cgo1.go
|
|
||||||
*.cgo2.c
|
|
||||||
_cgo_defun.c
|
|
||||||
_cgo_gotypes.go
|
|
||||||
_cgo_export.*
|
|
||||||
|
|
||||||
_testmain.go
|
|
||||||
|
|
||||||
*.exe
|
|
||||||
*.test
|
|
11
vendor/github.com/fatih/structs/.travis.yml
generated
vendored
11
vendor/github.com/fatih/structs/.travis.yml
generated
vendored
|
@ -1,11 +0,0 @@
|
||||||
language: go
|
|
||||||
go:
|
|
||||||
- 1.6
|
|
||||||
- tip
|
|
||||||
sudo: false
|
|
||||||
before_install:
|
|
||||||
- go get github.com/axw/gocov/gocov
|
|
||||||
- go get github.com/mattn/goveralls
|
|
||||||
- if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
|
|
||||||
script:
|
|
||||||
- $HOME/gopath/bin/goveralls -service=travis-ci
|
|
163
vendor/github.com/fatih/structs/README.md
generated
vendored
163
vendor/github.com/fatih/structs/README.md
generated
vendored
|
@ -1,163 +0,0 @@
|
||||||
# Structs [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/structs) [![Build Status](http://img.shields.io/travis/fatih/structs.svg?style=flat-square)](https://travis-ci.org/fatih/structs) [![Coverage Status](http://img.shields.io/coveralls/fatih/structs.svg?style=flat-square)](https://coveralls.io/r/fatih/structs)
|
|
||||||
|
|
||||||
Structs contains various utilities to work with Go (Golang) structs. It was
|
|
||||||
initially used by me to convert a struct into a `map[string]interface{}`. With
|
|
||||||
time I've added other utilities for structs. It's basically a high level
|
|
||||||
package based on primitives from the reflect package. Feel free to add new
|
|
||||||
functions or improve the existing code.
|
|
||||||
|
|
||||||
## Install
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go get github.com/fatih/structs
|
|
||||||
```
|
|
||||||
|
|
||||||
## Usage and Examples
|
|
||||||
|
|
||||||
Just like the standard lib `strings`, `bytes` and co packages, `structs` has
|
|
||||||
many global functions to manipulate or organize your struct data. Lets define
|
|
||||||
and declare a struct:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type Server struct {
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
ID int
|
|
||||||
Enabled bool
|
|
||||||
users []string // not exported
|
|
||||||
http.Server // embedded
|
|
||||||
}
|
|
||||||
|
|
||||||
server := &Server{
|
|
||||||
Name: "gopher",
|
|
||||||
ID: 123456,
|
|
||||||
Enabled: true,
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```go
|
|
||||||
// Convert a struct to a map[string]interface{}
|
|
||||||
// => {"Name":"gopher", "ID":123456, "Enabled":true}
|
|
||||||
m := structs.Map(server)
|
|
||||||
|
|
||||||
// Convert the values of a struct to a []interface{}
|
|
||||||
// => ["gopher", 123456, true]
|
|
||||||
v := structs.Values(server)
|
|
||||||
|
|
||||||
// Convert the names of a struct to a []string
|
|
||||||
// (see "Names methods" for more info about fields)
|
|
||||||
n := structs.Names(server)
|
|
||||||
|
|
||||||
// Convert the values of a struct to a []*Field
|
|
||||||
// (see "Field methods" for more info about fields)
|
|
||||||
f := structs.Fields(server)
|
|
||||||
|
|
||||||
// Return the struct name => "Server"
|
|
||||||
n := structs.Name(server)
|
|
||||||
|
|
||||||
// Check if any field of a struct is initialized or not.
|
|
||||||
h := structs.HasZero(server)
|
|
||||||
|
|
||||||
// Check if all fields of a struct is initialized or not.
|
|
||||||
z := structs.IsZero(server)
|
|
||||||
|
|
||||||
// Check if server is a struct or a pointer to struct
|
|
||||||
i := structs.IsStruct(server)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Struct methods
|
|
||||||
|
|
||||||
The structs functions can be also used as independent methods by creating a new
|
|
||||||
`*structs.Struct`. This is handy if you want to have more control over the
|
|
||||||
structs (such as retrieving a single Field).
|
|
||||||
|
|
||||||
```go
|
|
||||||
// Create a new struct type:
|
|
||||||
s := structs.New(server)
|
|
||||||
|
|
||||||
m := s.Map() // Get a map[string]interface{}
|
|
||||||
v := s.Values() // Get a []interface{}
|
|
||||||
f := s.Fields() // Get a []*Field
|
|
||||||
n := s.Names() // Get a []string
|
|
||||||
f := s.Field(name) // Get a *Field based on the given field name
|
|
||||||
f, ok := s.FieldOk(name) // Get a *Field based on the given field name
|
|
||||||
n := s.Name() // Get the struct name
|
|
||||||
h := s.HasZero() // Check if any field is initialized
|
|
||||||
z := s.IsZero() // Check if all fields are initialized
|
|
||||||
```
|
|
||||||
|
|
||||||
### Field methods
|
|
||||||
|
|
||||||
We can easily examine a single Field for more detail. Below you can see how we
|
|
||||||
get and interact with various field methods:
|
|
||||||
|
|
||||||
|
|
||||||
```go
|
|
||||||
s := structs.New(server)
|
|
||||||
|
|
||||||
// Get the Field struct for the "Name" field
|
|
||||||
name := s.Field("Name")
|
|
||||||
|
|
||||||
// Get the underlying value, value => "gopher"
|
|
||||||
value := name.Value().(string)
|
|
||||||
|
|
||||||
// Set the field's value
|
|
||||||
name.Set("another gopher")
|
|
||||||
|
|
||||||
// Get the field's kind, kind => "string"
|
|
||||||
name.Kind()
|
|
||||||
|
|
||||||
// Check if the field is exported or not
|
|
||||||
if name.IsExported() {
|
|
||||||
fmt.Println("Name field is exported")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the value is a zero value, such as "" for string, 0 for int
|
|
||||||
if !name.IsZero() {
|
|
||||||
fmt.Println("Name is initialized")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the field is an anonymous (embedded) field
|
|
||||||
if !name.IsEmbedded() {
|
|
||||||
fmt.Println("Name is not an embedded field")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the Field's tag value for tag name "json", tag value => "name,omitempty"
|
|
||||||
tagValue := name.Tag("json")
|
|
||||||
```
|
|
||||||
|
|
||||||
Nested structs are supported too:
|
|
||||||
|
|
||||||
```go
|
|
||||||
addrField := s.Field("Server").Field("Addr")
|
|
||||||
|
|
||||||
// Get the value for addr
|
|
||||||
a := addrField.Value().(string)
|
|
||||||
|
|
||||||
// Or get all fields
|
|
||||||
httpServer := s.Field("Server").Fields()
|
|
||||||
```
|
|
||||||
|
|
||||||
We can also get a slice of Fields from the Struct type to iterate over all
|
|
||||||
fields. This is handy if you wish to examine all fields:
|
|
||||||
|
|
||||||
```go
|
|
||||||
s := structs.New(server)
|
|
||||||
|
|
||||||
for _, f := range s.Fields() {
|
|
||||||
fmt.Printf("field name: %+v\n", f.Name())
|
|
||||||
|
|
||||||
if f.IsExported() {
|
|
||||||
fmt.Printf("value : %+v\n", f.Value())
|
|
||||||
fmt.Printf("is zero : %+v\n", f.IsZero())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Credits
|
|
||||||
|
|
||||||
* [Fatih Arslan](https://github.com/fatih)
|
|
||||||
* [Cihangir Savas](https://github.com/cihangir)
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
The MIT License (MIT) - see LICENSE.md for more details
|
|
132
vendor/github.com/fatih/structs/field.go
generated
vendored
132
vendor/github.com/fatih/structs/field.go
generated
vendored
|
@ -1,132 +0,0 @@
|
||||||
package structs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
errNotExported = errors.New("field is not exported")
|
|
||||||
errNotSettable = errors.New("field is not settable")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Field represents a single struct field that encapsulates high level
|
|
||||||
// functions around the field.
|
|
||||||
type Field struct {
|
|
||||||
value reflect.Value
|
|
||||||
field reflect.StructField
|
|
||||||
defaultTag string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tag returns the value associated with key in the tag string. If there is no
|
|
||||||
// such key in the tag, Tag returns the empty string.
|
|
||||||
func (f *Field) Tag(key string) string {
|
|
||||||
return f.field.Tag.Get(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the underlying value of the field. It panics if the field
|
|
||||||
// is not exported.
|
|
||||||
func (f *Field) Value() interface{} {
|
|
||||||
return f.value.Interface()
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsEmbedded returns true if the given field is an anonymous field (embedded)
|
|
||||||
func (f *Field) IsEmbedded() bool {
|
|
||||||
return f.field.Anonymous
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsExported returns true if the given field is exported.
|
|
||||||
func (f *Field) IsExported() bool {
|
|
||||||
return f.field.PkgPath == ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsZero returns true if the given field is not initialized (has a zero value).
|
|
||||||
// It panics if the field is not exported.
|
|
||||||
func (f *Field) IsZero() bool {
|
|
||||||
zero := reflect.Zero(f.value.Type()).Interface()
|
|
||||||
current := f.Value()
|
|
||||||
|
|
||||||
return reflect.DeepEqual(current, zero)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the name of the given field
|
|
||||||
func (f *Field) Name() string {
|
|
||||||
return f.field.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Kind returns the fields kind, such as "string", "map", "bool", etc ..
|
|
||||||
func (f *Field) Kind() reflect.Kind {
|
|
||||||
return f.value.Kind()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets the field to given value v. It returns an error if the field is not
|
|
||||||
// settable (not addressable or not exported) or if the given value's type
|
|
||||||
// doesn't match the fields type.
|
|
||||||
func (f *Field) Set(val interface{}) error {
|
|
||||||
// we can't set unexported fields, so be sure this field is exported
|
|
||||||
if !f.IsExported() {
|
|
||||||
return errNotExported
|
|
||||||
}
|
|
||||||
|
|
||||||
// do we get here? not sure...
|
|
||||||
if !f.value.CanSet() {
|
|
||||||
return errNotSettable
|
|
||||||
}
|
|
||||||
|
|
||||||
given := reflect.ValueOf(val)
|
|
||||||
|
|
||||||
if f.value.Kind() != given.Kind() {
|
|
||||||
return fmt.Errorf("wrong kind. got: %s want: %s", given.Kind(), f.value.Kind())
|
|
||||||
}
|
|
||||||
|
|
||||||
f.value.Set(given)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Zero sets the field to its zero value. It returns an error if the field is not
|
|
||||||
// settable (not addressable or not exported).
|
|
||||||
func (f *Field) Zero() error {
|
|
||||||
zero := reflect.Zero(f.value.Type()).Interface()
|
|
||||||
return f.Set(zero)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fields returns a slice of Fields. This is particular handy to get the fields
|
|
||||||
// of a nested struct . A struct tag with the content of "-" ignores the
|
|
||||||
// checking of that particular field. Example:
|
|
||||||
//
|
|
||||||
// // Field is ignored by this package.
|
|
||||||
// Field *http.Request `structs:"-"`
|
|
||||||
//
|
|
||||||
// It panics if field is not exported or if field's kind is not struct
|
|
||||||
func (f *Field) Fields() []*Field {
|
|
||||||
return getFields(f.value, f.defaultTag)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Field returns the field from a nested struct. It panics if the nested struct
|
|
||||||
// is not exported or if the field was not found.
|
|
||||||
func (f *Field) Field(name string) *Field {
|
|
||||||
field, ok := f.FieldOk(name)
|
|
||||||
if !ok {
|
|
||||||
panic("field not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
return field
|
|
||||||
}
|
|
||||||
|
|
||||||
// FieldOk returns the field from a nested struct. The boolean returns whether
|
|
||||||
// the field was found (true) or not (false).
|
|
||||||
func (f *Field) FieldOk(name string) (*Field, bool) {
|
|
||||||
v := strctVal(f.value.Interface())
|
|
||||||
t := v.Type()
|
|
||||||
|
|
||||||
field, ok := t.FieldByName(name)
|
|
||||||
if !ok {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Field{
|
|
||||||
field: field,
|
|
||||||
value: v.FieldByName(name),
|
|
||||||
}, true
|
|
||||||
}
|
|
507
vendor/github.com/fatih/structs/structs.go
generated
vendored
507
vendor/github.com/fatih/structs/structs.go
generated
vendored
|
@ -1,507 +0,0 @@
|
||||||
// Package structs contains various utilities functions to work with structs.
|
|
||||||
package structs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// DefaultTagName is the default tag name for struct fields which provides
|
|
||||||
// a more granular to tweak certain structs. Lookup the necessary functions
|
|
||||||
// for more info.
|
|
||||||
DefaultTagName = "structs" // struct's field default tag name
|
|
||||||
)
|
|
||||||
|
|
||||||
// Struct encapsulates a struct type to provide several high level functions
|
|
||||||
// around the struct.
|
|
||||||
type Struct struct {
|
|
||||||
raw interface{}
|
|
||||||
value reflect.Value
|
|
||||||
TagName string
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns a new *Struct with the struct s. It panics if the s's kind is
|
|
||||||
// not struct.
|
|
||||||
func New(s interface{}) *Struct {
|
|
||||||
return &Struct{
|
|
||||||
raw: s,
|
|
||||||
value: strctVal(s),
|
|
||||||
TagName: DefaultTagName,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Map converts the given struct to a map[string]interface{}, where the keys
|
|
||||||
// of the map are the field names and the values of the map the associated
|
|
||||||
// values of the fields. The default key string is the struct field name but
|
|
||||||
// can be changed in the struct field's tag value. The "structs" key in the
|
|
||||||
// struct's field tag value is the key name. Example:
|
|
||||||
//
|
|
||||||
// // Field appears in map as key "myName".
|
|
||||||
// Name string `structs:"myName"`
|
|
||||||
//
|
|
||||||
// A tag value with the content of "-" ignores that particular field. Example:
|
|
||||||
//
|
|
||||||
// // Field is ignored by this package.
|
|
||||||
// Field bool `structs:"-"`
|
|
||||||
//
|
|
||||||
// A tag value with the content of "string" uses the stringer to get the value. Example:
|
|
||||||
//
|
|
||||||
// // The value will be output of Animal's String() func.
|
|
||||||
// // Map will panic if Animal does not implement String().
|
|
||||||
// Field *Animal `structs:"field,string"`
|
|
||||||
//
|
|
||||||
// A tag value with the option of "flatten" used in a struct field is to flatten its fields
|
|
||||||
// in the output map. Example:
|
|
||||||
//
|
|
||||||
// // The FieldStruct's fields will be flattened into the output map.
|
|
||||||
// FieldStruct time.Time `structs:"flatten"`
|
|
||||||
//
|
|
||||||
// A tag value with the option of "omitnested" stops iterating further if the type
|
|
||||||
// is a struct. Example:
|
|
||||||
//
|
|
||||||
// // Field is not processed further by this package.
|
|
||||||
// Field time.Time `structs:"myName,omitnested"`
|
|
||||||
// Field *http.Request `structs:",omitnested"`
|
|
||||||
//
|
|
||||||
// A tag value with the option of "omitempty" ignores that particular field if
|
|
||||||
// the field value is empty. Example:
|
|
||||||
//
|
|
||||||
// // Field appears in map as key "myName", but the field is
|
|
||||||
// // skipped if empty.
|
|
||||||
// Field string `structs:"myName,omitempty"`
|
|
||||||
//
|
|
||||||
// // Field appears in map as key "Field" (the default), but
|
|
||||||
// // the field is skipped if empty.
|
|
||||||
// Field string `structs:",omitempty"`
|
|
||||||
//
|
|
||||||
// Note that only exported fields of a struct can be accessed, non exported
|
|
||||||
// fields will be neglected.
|
|
||||||
func (s *Struct) Map() map[string]interface{} {
|
|
||||||
out := make(map[string]interface{})
|
|
||||||
s.FillMap(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// FillMap is the same as Map. Instead of returning the output, it fills the
|
|
||||||
// given map.
|
|
||||||
func (s *Struct) FillMap(out map[string]interface{}) {
|
|
||||||
if out == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
fields := s.structFields()
|
|
||||||
|
|
||||||
for _, field := range fields {
|
|
||||||
name := field.Name
|
|
||||||
val := s.value.FieldByName(name)
|
|
||||||
isSubStruct := false
|
|
||||||
var finalVal interface{}
|
|
||||||
|
|
||||||
tagName, tagOpts := parseTag(field.Tag.Get(s.TagName))
|
|
||||||
if tagName != "" {
|
|
||||||
name = tagName
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the value is a zero value and the field is marked as omitempty do
|
|
||||||
// not include
|
|
||||||
if tagOpts.Has("omitempty") {
|
|
||||||
zero := reflect.Zero(val.Type()).Interface()
|
|
||||||
current := val.Interface()
|
|
||||||
|
|
||||||
if reflect.DeepEqual(current, zero) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
|
|
||||||
// look out for embedded structs, and convert them to a
|
|
||||||
// map[string]interface{} too
|
|
||||||
n := New(val.Interface())
|
|
||||||
n.TagName = s.TagName
|
|
||||||
m := n.Map()
|
|
||||||
isSubStruct = true
|
|
||||||
if len(m) == 0 {
|
|
||||||
finalVal = val.Interface()
|
|
||||||
} else {
|
|
||||||
finalVal = m
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
finalVal = val.Interface()
|
|
||||||
}
|
|
||||||
|
|
||||||
if tagOpts.Has("string") {
|
|
||||||
s, ok := val.Interface().(fmt.Stringer)
|
|
||||||
if ok {
|
|
||||||
out[name] = s.String()
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if isSubStruct && (tagOpts.Has("flatten")) {
|
|
||||||
for k := range finalVal.(map[string]interface{}) {
|
|
||||||
out[k] = finalVal.(map[string]interface{})[k]
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
out[name] = finalVal
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Values converts the given s struct's field values to a []interface{}. A
|
|
||||||
// struct tag with the content of "-" ignores the that particular field.
|
|
||||||
// Example:
|
|
||||||
//
|
|
||||||
// // Field is ignored by this package.
|
|
||||||
// Field int `structs:"-"`
|
|
||||||
//
|
|
||||||
// A value with the option of "omitnested" stops iterating further if the type
|
|
||||||
// is a struct. Example:
|
|
||||||
//
|
|
||||||
// // Fields is not processed further by this package.
|
|
||||||
// Field time.Time `structs:",omitnested"`
|
|
||||||
// Field *http.Request `structs:",omitnested"`
|
|
||||||
//
|
|
||||||
// A tag value with the option of "omitempty" ignores that particular field and
|
|
||||||
// is not added to the values if the field value is empty. Example:
|
|
||||||
//
|
|
||||||
// // Field is skipped if empty
|
|
||||||
// Field string `structs:",omitempty"`
|
|
||||||
//
|
|
||||||
// Note that only exported fields of a struct can be accessed, non exported
|
|
||||||
// fields will be neglected.
|
|
||||||
func (s *Struct) Values() []interface{} {
|
|
||||||
fields := s.structFields()
|
|
||||||
|
|
||||||
var t []interface{}
|
|
||||||
|
|
||||||
for _, field := range fields {
|
|
||||||
val := s.value.FieldByName(field.Name)
|
|
||||||
|
|
||||||
_, tagOpts := parseTag(field.Tag.Get(s.TagName))
|
|
||||||
|
|
||||||
// if the value is a zero value and the field is marked as omitempty do
|
|
||||||
// not include
|
|
||||||
if tagOpts.Has("omitempty") {
|
|
||||||
zero := reflect.Zero(val.Type()).Interface()
|
|
||||||
current := val.Interface()
|
|
||||||
|
|
||||||
if reflect.DeepEqual(current, zero) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if tagOpts.Has("string") {
|
|
||||||
s, ok := val.Interface().(fmt.Stringer)
|
|
||||||
if ok {
|
|
||||||
t = append(t, s.String())
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
|
|
||||||
// look out for embedded structs, and convert them to a
|
|
||||||
// []interface{} to be added to the final values slice
|
|
||||||
for _, embeddedVal := range Values(val.Interface()) {
|
|
||||||
t = append(t, embeddedVal)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
t = append(t, val.Interface())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fields returns a slice of Fields. A struct tag with the content of "-"
|
|
||||||
// ignores the checking of that particular field. Example:
|
|
||||||
//
|
|
||||||
// // Field is ignored by this package.
|
|
||||||
// Field bool `structs:"-"`
|
|
||||||
//
|
|
||||||
// It panics if s's kind is not struct.
|
|
||||||
func (s *Struct) Fields() []*Field {
|
|
||||||
return getFields(s.value, s.TagName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Names returns a slice of field names. A struct tag with the content of "-"
|
|
||||||
// ignores the checking of that particular field. Example:
|
|
||||||
//
|
|
||||||
// // Field is ignored by this package.
|
|
||||||
// Field bool `structs:"-"`
|
|
||||||
//
|
|
||||||
// It panics if s's kind is not struct.
|
|
||||||
func (s *Struct) Names() []string {
|
|
||||||
fields := getFields(s.value, s.TagName)
|
|
||||||
|
|
||||||
names := make([]string, len(fields))
|
|
||||||
|
|
||||||
for i, field := range fields {
|
|
||||||
names[i] = field.Name()
|
|
||||||
}
|
|
||||||
|
|
||||||
return names
|
|
||||||
}
|
|
||||||
|
|
||||||
func getFields(v reflect.Value, tagName string) []*Field {
|
|
||||||
if v.Kind() == reflect.Ptr {
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
t := v.Type()
|
|
||||||
|
|
||||||
var fields []*Field
|
|
||||||
|
|
||||||
for i := 0; i < t.NumField(); i++ {
|
|
||||||
field := t.Field(i)
|
|
||||||
|
|
||||||
if tag := field.Tag.Get(tagName); tag == "-" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
f := &Field{
|
|
||||||
field: field,
|
|
||||||
value: v.FieldByName(field.Name),
|
|
||||||
}
|
|
||||||
|
|
||||||
fields = append(fields, f)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
return fields
|
|
||||||
}
|
|
||||||
|
|
||||||
// Field returns a new Field struct that provides several high level functions
|
|
||||||
// around a single struct field entity. It panics if the field is not found.
|
|
||||||
func (s *Struct) Field(name string) *Field {
|
|
||||||
f, ok := s.FieldOk(name)
|
|
||||||
if !ok {
|
|
||||||
panic("field not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
// FieldOk returns a new Field struct that provides several high level functions
|
|
||||||
// around a single struct field entity. The boolean returns true if the field
|
|
||||||
// was found.
|
|
||||||
func (s *Struct) FieldOk(name string) (*Field, bool) {
|
|
||||||
t := s.value.Type()
|
|
||||||
|
|
||||||
field, ok := t.FieldByName(name)
|
|
||||||
if !ok {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Field{
|
|
||||||
field: field,
|
|
||||||
value: s.value.FieldByName(name),
|
|
||||||
defaultTag: s.TagName,
|
|
||||||
}, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsZero returns true if all fields in a struct is a zero value (not
|
|
||||||
// initialized) A struct tag with the content of "-" ignores the checking of
|
|
||||||
// that particular field. Example:
|
|
||||||
//
|
|
||||||
// // Field is ignored by this package.
|
|
||||||
// Field bool `structs:"-"`
|
|
||||||
//
|
|
||||||
// A value with the option of "omitnested" stops iterating further if the type
|
|
||||||
// is a struct. Example:
|
|
||||||
//
|
|
||||||
// // Field is not processed further by this package.
|
|
||||||
// Field time.Time `structs:"myName,omitnested"`
|
|
||||||
// Field *http.Request `structs:",omitnested"`
|
|
||||||
//
|
|
||||||
// Note that only exported fields of a struct can be accessed, non exported
|
|
||||||
// fields will be neglected. It panics if s's kind is not struct.
|
|
||||||
func (s *Struct) IsZero() bool {
|
|
||||||
fields := s.structFields()
|
|
||||||
|
|
||||||
for _, field := range fields {
|
|
||||||
val := s.value.FieldByName(field.Name)
|
|
||||||
|
|
||||||
_, tagOpts := parseTag(field.Tag.Get(s.TagName))
|
|
||||||
|
|
||||||
if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
|
|
||||||
ok := IsZero(val.Interface())
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// zero value of the given field, such as "" for string, 0 for int
|
|
||||||
zero := reflect.Zero(val.Type()).Interface()
|
|
||||||
|
|
||||||
// current value of the given field
|
|
||||||
current := val.Interface()
|
|
||||||
|
|
||||||
if !reflect.DeepEqual(current, zero) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasZero returns true if a field in a struct is not initialized (zero value).
|
|
||||||
// A struct tag with the content of "-" ignores the checking of that particular
|
|
||||||
// field. Example:
|
|
||||||
//
|
|
||||||
// // Field is ignored by this package.
|
|
||||||
// Field bool `structs:"-"`
|
|
||||||
//
|
|
||||||
// A value with the option of "omitnested" stops iterating further if the type
|
|
||||||
// is a struct. Example:
|
|
||||||
//
|
|
||||||
// // Field is not processed further by this package.
|
|
||||||
// Field time.Time `structs:"myName,omitnested"`
|
|
||||||
// Field *http.Request `structs:",omitnested"`
|
|
||||||
//
|
|
||||||
// Note that only exported fields of a struct can be accessed, non exported
|
|
||||||
// fields will be neglected. It panics if s's kind is not struct.
|
|
||||||
func (s *Struct) HasZero() bool {
|
|
||||||
fields := s.structFields()
|
|
||||||
|
|
||||||
for _, field := range fields {
|
|
||||||
val := s.value.FieldByName(field.Name)
|
|
||||||
|
|
||||||
_, tagOpts := parseTag(field.Tag.Get(s.TagName))
|
|
||||||
|
|
||||||
if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
|
|
||||||
ok := HasZero(val.Interface())
|
|
||||||
if ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// zero value of the given field, such as "" for string, 0 for int
|
|
||||||
zero := reflect.Zero(val.Type()).Interface()
|
|
||||||
|
|
||||||
// current value of the given field
|
|
||||||
current := val.Interface()
|
|
||||||
|
|
||||||
if reflect.DeepEqual(current, zero) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the structs's type name within its package. For more info refer
|
|
||||||
// to Name() function.
|
|
||||||
func (s *Struct) Name() string {
|
|
||||||
return s.value.Type().Name()
|
|
||||||
}
|
|
||||||
|
|
||||||
// structFields returns the exported struct fields for a given s struct. This
|
|
||||||
// is a convenient helper method to avoid duplicate code in some of the
|
|
||||||
// functions.
|
|
||||||
func (s *Struct) structFields() []reflect.StructField {
|
|
||||||
t := s.value.Type()
|
|
||||||
|
|
||||||
var f []reflect.StructField
|
|
||||||
|
|
||||||
for i := 0; i < t.NumField(); i++ {
|
|
||||||
field := t.Field(i)
|
|
||||||
// we can't access the value of unexported fields
|
|
||||||
if field.PkgPath != "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// don't check if it's omitted
|
|
||||||
if tag := field.Tag.Get(s.TagName); tag == "-" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
f = append(f, field)
|
|
||||||
}
|
|
||||||
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
func strctVal(s interface{}) reflect.Value {
|
|
||||||
v := reflect.ValueOf(s)
|
|
||||||
|
|
||||||
// if pointer get the underlying element≤
|
|
||||||
if v.Kind() == reflect.Ptr {
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.Kind() != reflect.Struct {
|
|
||||||
panic("not struct")
|
|
||||||
}
|
|
||||||
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Map converts the given struct to a map[string]interface{}. For more info
|
|
||||||
// refer to Struct types Map() method. It panics if s's kind is not struct.
|
|
||||||
func Map(s interface{}) map[string]interface{} {
|
|
||||||
return New(s).Map()
|
|
||||||
}
|
|
||||||
|
|
||||||
// FillMap is the same as Map. Instead of returning the output, it fills the
|
|
||||||
// given map.
|
|
||||||
func FillMap(s interface{}, out map[string]interface{}) {
|
|
||||||
New(s).FillMap(out)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Values converts the given struct to a []interface{}. For more info refer to
|
|
||||||
// Struct types Values() method. It panics if s's kind is not struct.
|
|
||||||
func Values(s interface{}) []interface{} {
|
|
||||||
return New(s).Values()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fields returns a slice of *Field. For more info refer to Struct types
|
|
||||||
// Fields() method. It panics if s's kind is not struct.
|
|
||||||
func Fields(s interface{}) []*Field {
|
|
||||||
return New(s).Fields()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Names returns a slice of field names. For more info refer to Struct types
|
|
||||||
// Names() method. It panics if s's kind is not struct.
|
|
||||||
func Names(s interface{}) []string {
|
|
||||||
return New(s).Names()
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsZero returns true if all fields is equal to a zero value. For more info
|
|
||||||
// refer to Struct types IsZero() method. It panics if s's kind is not struct.
|
|
||||||
func IsZero(s interface{}) bool {
|
|
||||||
return New(s).IsZero()
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasZero returns true if any field is equal to a zero value. For more info
|
|
||||||
// refer to Struct types HasZero() method. It panics if s's kind is not struct.
|
|
||||||
func HasZero(s interface{}) bool {
|
|
||||||
return New(s).HasZero()
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsStruct returns true if the given variable is a struct or a pointer to
|
|
||||||
// struct.
|
|
||||||
func IsStruct(s interface{}) bool {
|
|
||||||
v := reflect.ValueOf(s)
|
|
||||||
if v.Kind() == reflect.Ptr {
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
// uninitialized zero value of a struct
|
|
||||||
if v.Kind() == reflect.Invalid {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return v.Kind() == reflect.Struct
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the structs's type name within its package. It returns an
|
|
||||||
// empty string for unnamed types. It panics if s's kind is not struct.
|
|
||||||
func Name(s interface{}) string {
|
|
||||||
return New(s).Name()
|
|
||||||
}
|
|
32
vendor/github.com/fatih/structs/tags.go
generated
vendored
32
vendor/github.com/fatih/structs/tags.go
generated
vendored
|
@ -1,32 +0,0 @@
|
||||||
package structs
|
|
||||||
|
|
||||||
import "strings"
|
|
||||||
|
|
||||||
// tagOptions contains a slice of tag options
|
|
||||||
type tagOptions []string
|
|
||||||
|
|
||||||
// Has returns true if the given optiton is available in tagOptions
|
|
||||||
func (t tagOptions) Has(opt string) bool {
|
|
||||||
for _, tagOpt := range t {
|
|
||||||
if tagOpt == opt {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseTag splits a struct field's tag into its name and a list of options
|
|
||||||
// which comes after a name. A tag is in the form of: "name,option1,option2".
|
|
||||||
// The name can be neglectected.
|
|
||||||
func parseTag(tag string) (string, tagOptions) {
|
|
||||||
// tag is one of followings:
|
|
||||||
// ""
|
|
||||||
// "name"
|
|
||||||
// "name,opt"
|
|
||||||
// "name,opt,opt2"
|
|
||||||
// ",opt"
|
|
||||||
|
|
||||||
res := strings.Split(tag, ",")
|
|
||||||
return res[0], res[1:]
|
|
||||||
}
|
|
15
vendor/github.com/golang/snappy/AUTHORS
generated
vendored
Normal file
15
vendor/github.com/golang/snappy/AUTHORS
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
# This is the official list of Snappy-Go authors for copyright purposes.
|
||||||
|
# This file is distinct from the CONTRIBUTORS files.
|
||||||
|
# See the latter for an explanation.
|
||||||
|
|
||||||
|
# Names should be added to this file as
|
||||||
|
# Name or Organization <email address>
|
||||||
|
# The email address is not required for organizations.
|
||||||
|
|
||||||
|
# Please keep the list sorted.
|
||||||
|
|
||||||
|
Damian Gryski <dgryski@gmail.com>
|
||||||
|
Google Inc.
|
||||||
|
Jan Mercl <0xjnml@gmail.com>
|
||||||
|
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||||
|
Sebastien Binet <seb.binet@gmail.com>
|
37
vendor/github.com/golang/snappy/CONTRIBUTORS
generated
vendored
Normal file
37
vendor/github.com/golang/snappy/CONTRIBUTORS
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
# This is the official list of people who can contribute
|
||||||
|
# (and typically have contributed) code to the Snappy-Go repository.
|
||||||
|
# The AUTHORS file lists the copyright holders; this file
|
||||||
|
# lists people. For example, Google employees are listed here
|
||||||
|
# but not in AUTHORS, because Google holds the copyright.
|
||||||
|
#
|
||||||
|
# The submission process automatically checks to make sure
|
||||||
|
# that people submitting code are listed in this file (by email address).
|
||||||
|
#
|
||||||
|
# Names should be added to this file only after verifying that
|
||||||
|
# the individual or the individual's organization has agreed to
|
||||||
|
# the appropriate Contributor License Agreement, found here:
|
||||||
|
#
|
||||||
|
# http://code.google.com/legal/individual-cla-v1.0.html
|
||||||
|
# http://code.google.com/legal/corporate-cla-v1.0.html
|
||||||
|
#
|
||||||
|
# The agreement for individuals can be filled out on the web.
|
||||||
|
#
|
||||||
|
# When adding J Random Contributor's name to this file,
|
||||||
|
# either J's name or J's organization's name should be
|
||||||
|
# added to the AUTHORS file, depending on whether the
|
||||||
|
# individual or corporate CLA was used.
|
||||||
|
|
||||||
|
# Names should be added to this file like so:
|
||||||
|
# Name <email address>
|
||||||
|
|
||||||
|
# Please keep the list sorted.
|
||||||
|
|
||||||
|
Damian Gryski <dgryski@gmail.com>
|
||||||
|
Jan Mercl <0xjnml@gmail.com>
|
||||||
|
Kai Backman <kaib@golang.org>
|
||||||
|
Marc-Antoine Ruel <maruel@chromium.org>
|
||||||
|
Nigel Tao <nigeltao@golang.org>
|
||||||
|
Rob Pike <r@golang.org>
|
||||||
|
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||||
|
Russ Cox <rsc@golang.org>
|
||||||
|
Sebastien Binet <seb.binet@gmail.com>
|
27
vendor/github.com/golang/snappy/LICENSE
generated
vendored
Normal file
27
vendor/github.com/golang/snappy/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
237
vendor/github.com/golang/snappy/decode.go
generated
vendored
Normal file
237
vendor/github.com/golang/snappy/decode.go
generated
vendored
Normal file
|
@ -0,0 +1,237 @@
|
||||||
|
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package snappy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrCorrupt reports that the input is invalid.
|
||||||
|
ErrCorrupt = errors.New("snappy: corrupt input")
|
||||||
|
// ErrTooLarge reports that the uncompressed length is too large.
|
||||||
|
ErrTooLarge = errors.New("snappy: decoded block is too large")
|
||||||
|
// ErrUnsupported reports that the input isn't supported.
|
||||||
|
ErrUnsupported = errors.New("snappy: unsupported input")
|
||||||
|
|
||||||
|
errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
|
||||||
|
)
|
||||||
|
|
||||||
|
// DecodedLen returns the length of the decoded block.
|
||||||
|
func DecodedLen(src []byte) (int, error) {
|
||||||
|
v, _, err := decodedLen(src)
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodedLen returns the length of the decoded block and the number of bytes
|
||||||
|
// that the length header occupied.
|
||||||
|
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
|
||||||
|
v, n := binary.Uvarint(src)
|
||||||
|
if n <= 0 || v > 0xffffffff {
|
||||||
|
return 0, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
|
||||||
|
const wordSize = 32 << (^uint(0) >> 32 & 1)
|
||||||
|
if wordSize == 32 && v > 0x7fffffff {
|
||||||
|
return 0, 0, ErrTooLarge
|
||||||
|
}
|
||||||
|
return int(v), n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
decodeErrCodeCorrupt = 1
|
||||||
|
decodeErrCodeUnsupportedLiteralLength = 2
|
||||||
|
)
|
||||||
|
|
||||||
|
// Decode returns the decoded form of src. The returned slice may be a sub-
|
||||||
|
// slice of dst if dst was large enough to hold the entire decoded block.
|
||||||
|
// Otherwise, a newly allocated slice will be returned.
|
||||||
|
//
|
||||||
|
// The dst and src must not overlap. It is valid to pass a nil dst.
|
||||||
|
func Decode(dst, src []byte) ([]byte, error) {
|
||||||
|
dLen, s, err := decodedLen(src)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if dLen <= len(dst) {
|
||||||
|
dst = dst[:dLen]
|
||||||
|
} else {
|
||||||
|
dst = make([]byte, dLen)
|
||||||
|
}
|
||||||
|
switch decode(dst, src[s:]) {
|
||||||
|
case 0:
|
||||||
|
return dst, nil
|
||||||
|
case decodeErrCodeUnsupportedLiteralLength:
|
||||||
|
return nil, errUnsupportedLiteralLength
|
||||||
|
}
|
||||||
|
return nil, ErrCorrupt
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReader returns a new Reader that decompresses from r, using the framing
|
||||||
|
// format described at
|
||||||
|
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||||
|
func NewReader(r io.Reader) *Reader {
|
||||||
|
return &Reader{
|
||||||
|
r: r,
|
||||||
|
decoded: make([]byte, maxBlockSize),
|
||||||
|
buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reader is an io.Reader that can read Snappy-compressed bytes.
|
||||||
|
type Reader struct {
|
||||||
|
r io.Reader
|
||||||
|
err error
|
||||||
|
decoded []byte
|
||||||
|
buf []byte
|
||||||
|
// decoded[i:j] contains decoded bytes that have not yet been passed on.
|
||||||
|
i, j int
|
||||||
|
readHeader bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset discards any buffered data, resets all state, and switches the Snappy
|
||||||
|
// reader to read from r. This permits reusing a Reader rather than allocating
|
||||||
|
// a new one.
|
||||||
|
func (r *Reader) Reset(reader io.Reader) {
|
||||||
|
r.r = reader
|
||||||
|
r.err = nil
|
||||||
|
r.i = 0
|
||||||
|
r.j = 0
|
||||||
|
r.readHeader = false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
|
||||||
|
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
|
||||||
|
if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read satisfies the io.Reader interface.
|
||||||
|
func (r *Reader) Read(p []byte) (int, error) {
|
||||||
|
if r.err != nil {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
if r.i < r.j {
|
||||||
|
n := copy(p, r.decoded[r.i:r.j])
|
||||||
|
r.i += n
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
if !r.readFull(r.buf[:4], true) {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
chunkType := r.buf[0]
|
||||||
|
if !r.readHeader {
|
||||||
|
if chunkType != chunkTypeStreamIdentifier {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
r.readHeader = true
|
||||||
|
}
|
||||||
|
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
|
||||||
|
if chunkLen > len(r.buf) {
|
||||||
|
r.err = ErrUnsupported
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// The chunk types are specified at
|
||||||
|
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||||
|
switch chunkType {
|
||||||
|
case chunkTypeCompressedData:
|
||||||
|
// Section 4.2. Compressed data (chunk type 0x00).
|
||||||
|
if chunkLen < checksumSize {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
buf := r.buf[:chunkLen]
|
||||||
|
if !r.readFull(buf, false) {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
|
||||||
|
buf = buf[checksumSize:]
|
||||||
|
|
||||||
|
n, err := DecodedLen(buf)
|
||||||
|
if err != nil {
|
||||||
|
r.err = err
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
if n > len(r.decoded) {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
if _, err := Decode(r.decoded, buf); err != nil {
|
||||||
|
r.err = err
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
if crc(r.decoded[:n]) != checksum {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
r.i, r.j = 0, n
|
||||||
|
continue
|
||||||
|
|
||||||
|
case chunkTypeUncompressedData:
|
||||||
|
// Section 4.3. Uncompressed data (chunk type 0x01).
|
||||||
|
if chunkLen < checksumSize {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
buf := r.buf[:checksumSize]
|
||||||
|
if !r.readFull(buf, false) {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
|
||||||
|
// Read directly into r.decoded instead of via r.buf.
|
||||||
|
n := chunkLen - checksumSize
|
||||||
|
if n > len(r.decoded) {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
if !r.readFull(r.decoded[:n], false) {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
if crc(r.decoded[:n]) != checksum {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
r.i, r.j = 0, n
|
||||||
|
continue
|
||||||
|
|
||||||
|
case chunkTypeStreamIdentifier:
|
||||||
|
// Section 4.1. Stream identifier (chunk type 0xff).
|
||||||
|
if chunkLen != len(magicBody) {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
if !r.readFull(r.buf[:len(magicBody)], false) {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
for i := 0; i < len(magicBody); i++ {
|
||||||
|
if r.buf[i] != magicBody[i] {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if chunkType <= 0x7f {
|
||||||
|
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
|
||||||
|
r.err = ErrUnsupported
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
// Section 4.4 Padding (chunk type 0xfe).
|
||||||
|
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
|
||||||
|
if !r.readFull(r.buf[:chunkLen], false) {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
14
vendor/github.com/golang/snappy/decode_amd64.go
generated
vendored
Normal file
14
vendor/github.com/golang/snappy/decode_amd64.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !noasm
|
||||||
|
|
||||||
|
package snappy
|
||||||
|
|
||||||
|
// decode has the same semantics as in decode_other.go.
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func decode(dst, src []byte) int
|
490
vendor/github.com/golang/snappy/decode_amd64.s
generated
vendored
Normal file
490
vendor/github.com/golang/snappy/decode_amd64.s
generated
vendored
Normal file
|
@ -0,0 +1,490 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !noasm
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
// The asm code generally follows the pure Go code in decode_other.go, except
|
||||||
|
// where marked with a "!!!".
|
||||||
|
|
||||||
|
// func decode(dst, src []byte) int
|
||||||
|
//
|
||||||
|
// All local variables fit into registers. The non-zero stack size is only to
|
||||||
|
// spill registers and push args when issuing a CALL. The register allocation:
|
||||||
|
// - AX scratch
|
||||||
|
// - BX scratch
|
||||||
|
// - CX length or x
|
||||||
|
// - DX offset
|
||||||
|
// - SI &src[s]
|
||||||
|
// - DI &dst[d]
|
||||||
|
// + R8 dst_base
|
||||||
|
// + R9 dst_len
|
||||||
|
// + R10 dst_base + dst_len
|
||||||
|
// + R11 src_base
|
||||||
|
// + R12 src_len
|
||||||
|
// + R13 src_base + src_len
|
||||||
|
// - R14 used by doCopy
|
||||||
|
// - R15 used by doCopy
|
||||||
|
//
|
||||||
|
// The registers R8-R13 (marked with a "+") are set at the start of the
|
||||||
|
// function, and after a CALL returns, and are not otherwise modified.
|
||||||
|
//
|
||||||
|
// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
|
||||||
|
// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
|
||||||
|
TEXT ·decode(SB), NOSPLIT, $48-56
|
||||||
|
// Initialize SI, DI and R8-R13.
|
||||||
|
MOVQ dst_base+0(FP), R8
|
||||||
|
MOVQ dst_len+8(FP), R9
|
||||||
|
MOVQ R8, DI
|
||||||
|
MOVQ R8, R10
|
||||||
|
ADDQ R9, R10
|
||||||
|
MOVQ src_base+24(FP), R11
|
||||||
|
MOVQ src_len+32(FP), R12
|
||||||
|
MOVQ R11, SI
|
||||||
|
MOVQ R11, R13
|
||||||
|
ADDQ R12, R13
|
||||||
|
|
||||||
|
loop:
|
||||||
|
// for s < len(src)
|
||||||
|
CMPQ SI, R13
|
||||||
|
JEQ end
|
||||||
|
|
||||||
|
// CX = uint32(src[s])
|
||||||
|
//
|
||||||
|
// switch src[s] & 0x03
|
||||||
|
MOVBLZX (SI), CX
|
||||||
|
MOVL CX, BX
|
||||||
|
ANDL $3, BX
|
||||||
|
CMPL BX, $1
|
||||||
|
JAE tagCopy
|
||||||
|
|
||||||
|
// ----------------------------------------
|
||||||
|
// The code below handles literal tags.
|
||||||
|
|
||||||
|
// case tagLiteral:
|
||||||
|
// x := uint32(src[s] >> 2)
|
||||||
|
// switch
|
||||||
|
SHRL $2, CX
|
||||||
|
CMPL CX, $60
|
||||||
|
JAE tagLit60Plus
|
||||||
|
|
||||||
|
// case x < 60:
|
||||||
|
// s++
|
||||||
|
INCQ SI
|
||||||
|
|
||||||
|
doLit:
|
||||||
|
// This is the end of the inner "switch", when we have a literal tag.
|
||||||
|
//
|
||||||
|
// We assume that CX == x and x fits in a uint32, where x is the variable
|
||||||
|
// used in the pure Go decode_other.go code.
|
||||||
|
|
||||||
|
// length = int(x) + 1
|
||||||
|
//
|
||||||
|
// Unlike the pure Go code, we don't need to check if length <= 0 because
|
||||||
|
// CX can hold 64 bits, so the increment cannot overflow.
|
||||||
|
INCQ CX
|
||||||
|
|
||||||
|
// Prepare to check if copying length bytes will run past the end of dst or
|
||||||
|
// src.
|
||||||
|
//
|
||||||
|
// AX = len(dst) - d
|
||||||
|
// BX = len(src) - s
|
||||||
|
MOVQ R10, AX
|
||||||
|
SUBQ DI, AX
|
||||||
|
MOVQ R13, BX
|
||||||
|
SUBQ SI, BX
|
||||||
|
|
||||||
|
// !!! Try a faster technique for short (16 or fewer bytes) copies.
|
||||||
|
//
|
||||||
|
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
|
||||||
|
// goto callMemmove // Fall back on calling runtime·memmove.
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
|
||||||
|
// against 21 instead of 16, because it cannot assume that all of its input
|
||||||
|
// is contiguous in memory and so it needs to leave enough source bytes to
|
||||||
|
// read the next tag without refilling buffers, but Go's Decode assumes
|
||||||
|
// contiguousness (the src argument is a []byte).
|
||||||
|
CMPQ CX, $16
|
||||||
|
JGT callMemmove
|
||||||
|
CMPQ AX, $16
|
||||||
|
JLT callMemmove
|
||||||
|
CMPQ BX, $16
|
||||||
|
JLT callMemmove
|
||||||
|
|
||||||
|
// !!! Implement the copy from src to dst as a 16-byte load and store.
|
||||||
|
// (Decode's documentation says that dst and src must not overlap.)
|
||||||
|
//
|
||||||
|
// This always copies 16 bytes, instead of only length bytes, but that's
|
||||||
|
// OK. If the input is a valid Snappy encoding then subsequent iterations
|
||||||
|
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
|
||||||
|
// non-nil error), so the overrun will be ignored.
|
||||||
|
//
|
||||||
|
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
|
||||||
|
// 16-byte loads and stores. This technique probably wouldn't be as
|
||||||
|
// effective on architectures that are fussier about alignment.
|
||||||
|
MOVOU 0(SI), X0
|
||||||
|
MOVOU X0, 0(DI)
|
||||||
|
|
||||||
|
// d += length
|
||||||
|
// s += length
|
||||||
|
ADDQ CX, DI
|
||||||
|
ADDQ CX, SI
|
||||||
|
JMP loop
|
||||||
|
|
||||||
|
callMemmove:
|
||||||
|
// if length > len(dst)-d || length > len(src)-s { etc }
|
||||||
|
CMPQ CX, AX
|
||||||
|
JGT errCorrupt
|
||||||
|
CMPQ CX, BX
|
||||||
|
JGT errCorrupt
|
||||||
|
|
||||||
|
// copy(dst[d:], src[s:s+length])
|
||||||
|
//
|
||||||
|
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
|
||||||
|
// DI, SI and CX as arguments. Coincidentally, we also need to spill those
|
||||||
|
// three registers to the stack, to save local variables across the CALL.
|
||||||
|
MOVQ DI, 0(SP)
|
||||||
|
MOVQ SI, 8(SP)
|
||||||
|
MOVQ CX, 16(SP)
|
||||||
|
MOVQ DI, 24(SP)
|
||||||
|
MOVQ SI, 32(SP)
|
||||||
|
MOVQ CX, 40(SP)
|
||||||
|
CALL runtime·memmove(SB)
|
||||||
|
|
||||||
|
// Restore local variables: unspill registers from the stack and
|
||||||
|
// re-calculate R8-R13.
|
||||||
|
MOVQ 24(SP), DI
|
||||||
|
MOVQ 32(SP), SI
|
||||||
|
MOVQ 40(SP), CX
|
||||||
|
MOVQ dst_base+0(FP), R8
|
||||||
|
MOVQ dst_len+8(FP), R9
|
||||||
|
MOVQ R8, R10
|
||||||
|
ADDQ R9, R10
|
||||||
|
MOVQ src_base+24(FP), R11
|
||||||
|
MOVQ src_len+32(FP), R12
|
||||||
|
MOVQ R11, R13
|
||||||
|
ADDQ R12, R13
|
||||||
|
|
||||||
|
// d += length
|
||||||
|
// s += length
|
||||||
|
ADDQ CX, DI
|
||||||
|
ADDQ CX, SI
|
||||||
|
JMP loop
|
||||||
|
|
||||||
|
tagLit60Plus:
|
||||||
|
// !!! This fragment does the
|
||||||
|
//
|
||||||
|
// s += x - 58; if uint(s) > uint(len(src)) { etc }
|
||||||
|
//
|
||||||
|
// checks. In the asm version, we code it once instead of once per switch case.
|
||||||
|
ADDQ CX, SI
|
||||||
|
SUBQ $58, SI
|
||||||
|
MOVQ SI, BX
|
||||||
|
SUBQ R11, BX
|
||||||
|
CMPQ BX, R12
|
||||||
|
JA errCorrupt
|
||||||
|
|
||||||
|
// case x == 60:
|
||||||
|
CMPL CX, $61
|
||||||
|
JEQ tagLit61
|
||||||
|
JA tagLit62Plus
|
||||||
|
|
||||||
|
// x = uint32(src[s-1])
|
||||||
|
MOVBLZX -1(SI), CX
|
||||||
|
JMP doLit
|
||||||
|
|
||||||
|
tagLit61:
|
||||||
|
// case x == 61:
|
||||||
|
// x = uint32(src[s-2]) | uint32(src[s-1])<<8
|
||||||
|
MOVWLZX -2(SI), CX
|
||||||
|
JMP doLit
|
||||||
|
|
||||||
|
tagLit62Plus:
|
||||||
|
CMPL CX, $62
|
||||||
|
JA tagLit63
|
||||||
|
|
||||||
|
// case x == 62:
|
||||||
|
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
|
||||||
|
MOVWLZX -3(SI), CX
|
||||||
|
MOVBLZX -1(SI), BX
|
||||||
|
SHLL $16, BX
|
||||||
|
ORL BX, CX
|
||||||
|
JMP doLit
|
||||||
|
|
||||||
|
tagLit63:
|
||||||
|
// case x == 63:
|
||||||
|
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
|
||||||
|
MOVL -4(SI), CX
|
||||||
|
JMP doLit
|
||||||
|
|
||||||
|
// The code above handles literal tags.
|
||||||
|
// ----------------------------------------
|
||||||
|
// The code below handles copy tags.
|
||||||
|
|
||||||
|
tagCopy4:
|
||||||
|
// case tagCopy4:
|
||||||
|
// s += 5
|
||||||
|
ADDQ $5, SI
|
||||||
|
|
||||||
|
// if uint(s) > uint(len(src)) { etc }
|
||||||
|
MOVQ SI, BX
|
||||||
|
SUBQ R11, BX
|
||||||
|
CMPQ BX, R12
|
||||||
|
JA errCorrupt
|
||||||
|
|
||||||
|
// length = 1 + int(src[s-5])>>2
|
||||||
|
SHRQ $2, CX
|
||||||
|
INCQ CX
|
||||||
|
|
||||||
|
// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
|
||||||
|
MOVLQZX -4(SI), DX
|
||||||
|
JMP doCopy
|
||||||
|
|
||||||
|
tagCopy2:
|
||||||
|
// case tagCopy2:
|
||||||
|
// s += 3
|
||||||
|
ADDQ $3, SI
|
||||||
|
|
||||||
|
// if uint(s) > uint(len(src)) { etc }
|
||||||
|
MOVQ SI, BX
|
||||||
|
SUBQ R11, BX
|
||||||
|
CMPQ BX, R12
|
||||||
|
JA errCorrupt
|
||||||
|
|
||||||
|
// length = 1 + int(src[s-3])>>2
|
||||||
|
SHRQ $2, CX
|
||||||
|
INCQ CX
|
||||||
|
|
||||||
|
// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
|
||||||
|
MOVWQZX -2(SI), DX
|
||||||
|
JMP doCopy
|
||||||
|
|
||||||
|
tagCopy:
|
||||||
|
// We have a copy tag. We assume that:
|
||||||
|
// - BX == src[s] & 0x03
|
||||||
|
// - CX == src[s]
|
||||||
|
CMPQ BX, $2
|
||||||
|
JEQ tagCopy2
|
||||||
|
JA tagCopy4
|
||||||
|
|
||||||
|
// case tagCopy1:
|
||||||
|
// s += 2
|
||||||
|
ADDQ $2, SI
|
||||||
|
|
||||||
|
// if uint(s) > uint(len(src)) { etc }
|
||||||
|
MOVQ SI, BX
|
||||||
|
SUBQ R11, BX
|
||||||
|
CMPQ BX, R12
|
||||||
|
JA errCorrupt
|
||||||
|
|
||||||
|
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
|
||||||
|
MOVQ CX, DX
|
||||||
|
ANDQ $0xe0, DX
|
||||||
|
SHLQ $3, DX
|
||||||
|
MOVBQZX -1(SI), BX
|
||||||
|
ORQ BX, DX
|
||||||
|
|
||||||
|
// length = 4 + int(src[s-2])>>2&0x7
|
||||||
|
SHRQ $2, CX
|
||||||
|
ANDQ $7, CX
|
||||||
|
ADDQ $4, CX
|
||||||
|
|
||||||
|
doCopy:
|
||||||
|
// This is the end of the outer "switch", when we have a copy tag.
|
||||||
|
//
|
||||||
|
// We assume that:
|
||||||
|
// - CX == length && CX > 0
|
||||||
|
// - DX == offset
|
||||||
|
|
||||||
|
// if offset <= 0 { etc }
|
||||||
|
CMPQ DX, $0
|
||||||
|
JLE errCorrupt
|
||||||
|
|
||||||
|
// if d < offset { etc }
|
||||||
|
MOVQ DI, BX
|
||||||
|
SUBQ R8, BX
|
||||||
|
CMPQ BX, DX
|
||||||
|
JLT errCorrupt
|
||||||
|
|
||||||
|
// if length > len(dst)-d { etc }
|
||||||
|
MOVQ R10, BX
|
||||||
|
SUBQ DI, BX
|
||||||
|
CMPQ CX, BX
|
||||||
|
JGT errCorrupt
|
||||||
|
|
||||||
|
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
|
||||||
|
//
|
||||||
|
// Set:
|
||||||
|
// - R14 = len(dst)-d
|
||||||
|
// - R15 = &dst[d-offset]
|
||||||
|
MOVQ R10, R14
|
||||||
|
SUBQ DI, R14
|
||||||
|
MOVQ DI, R15
|
||||||
|
SUBQ DX, R15
|
||||||
|
|
||||||
|
// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
|
||||||
|
//
|
||||||
|
// First, try using two 8-byte load/stores, similar to the doLit technique
|
||||||
|
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
|
||||||
|
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
|
||||||
|
// and not one 16-byte load/store, and the first store has to be before the
|
||||||
|
// second load, due to the overlap if offset is in the range [8, 16).
|
||||||
|
//
|
||||||
|
// if length > 16 || offset < 8 || len(dst)-d < 16 {
|
||||||
|
// goto slowForwardCopy
|
||||||
|
// }
|
||||||
|
// copy 16 bytes
|
||||||
|
// d += length
|
||||||
|
CMPQ CX, $16
|
||||||
|
JGT slowForwardCopy
|
||||||
|
CMPQ DX, $8
|
||||||
|
JLT slowForwardCopy
|
||||||
|
CMPQ R14, $16
|
||||||
|
JLT slowForwardCopy
|
||||||
|
MOVQ 0(R15), AX
|
||||||
|
MOVQ AX, 0(DI)
|
||||||
|
MOVQ 8(R15), BX
|
||||||
|
MOVQ BX, 8(DI)
|
||||||
|
ADDQ CX, DI
|
||||||
|
JMP loop
|
||||||
|
|
||||||
|
slowForwardCopy:
|
||||||
|
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
|
||||||
|
// can still try 8-byte load stores, provided we can overrun up to 10 extra
|
||||||
|
// bytes. As above, the overrun will be fixed up by subsequent iterations
|
||||||
|
// of the outermost loop.
|
||||||
|
//
|
||||||
|
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
|
||||||
|
// commentary says:
|
||||||
|
//
|
||||||
|
// ----
|
||||||
|
//
|
||||||
|
// The main part of this loop is a simple copy of eight bytes at a time
|
||||||
|
// until we've copied (at least) the requested amount of bytes. However,
|
||||||
|
// if d and d-offset are less than eight bytes apart (indicating a
|
||||||
|
// repeating pattern of length < 8), we first need to expand the pattern in
|
||||||
|
// order to get the correct results. For instance, if the buffer looks like
|
||||||
|
// this, with the eight-byte <d-offset> and <d> patterns marked as
|
||||||
|
// intervals:
|
||||||
|
//
|
||||||
|
// abxxxxxxxxxxxx
|
||||||
|
// [------] d-offset
|
||||||
|
// [------] d
|
||||||
|
//
|
||||||
|
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
|
||||||
|
// once, after which we can move <d> two bytes without moving <d-offset>:
|
||||||
|
//
|
||||||
|
// ababxxxxxxxxxx
|
||||||
|
// [------] d-offset
|
||||||
|
// [------] d
|
||||||
|
//
|
||||||
|
// and repeat the exercise until the two no longer overlap.
|
||||||
|
//
|
||||||
|
// This allows us to do very well in the special case of one single byte
|
||||||
|
// repeated many times, without taking a big hit for more general cases.
|
||||||
|
//
|
||||||
|
// The worst case of extra writing past the end of the match occurs when
|
||||||
|
// offset == 1 and length == 1; the last copy will read from byte positions
|
||||||
|
// [0..7] and write to [4..11], whereas it was only supposed to write to
|
||||||
|
// position 1. Thus, ten excess bytes.
|
||||||
|
//
|
||||||
|
// ----
|
||||||
|
//
|
||||||
|
// That "10 byte overrun" worst case is confirmed by Go's
|
||||||
|
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
|
||||||
|
// and finishSlowForwardCopy algorithm.
|
||||||
|
//
|
||||||
|
// if length > len(dst)-d-10 {
|
||||||
|
// goto verySlowForwardCopy
|
||||||
|
// }
|
||||||
|
SUBQ $10, R14
|
||||||
|
CMPQ CX, R14
|
||||||
|
JGT verySlowForwardCopy
|
||||||
|
|
||||||
|
makeOffsetAtLeast8:
|
||||||
|
// !!! As above, expand the pattern so that offset >= 8 and we can use
|
||||||
|
// 8-byte load/stores.
|
||||||
|
//
|
||||||
|
// for offset < 8 {
|
||||||
|
// copy 8 bytes from dst[d-offset:] to dst[d:]
|
||||||
|
// length -= offset
|
||||||
|
// d += offset
|
||||||
|
// offset += offset
|
||||||
|
// // The two previous lines together means that d-offset, and therefore
|
||||||
|
// // R15, is unchanged.
|
||||||
|
// }
|
||||||
|
CMPQ DX, $8
|
||||||
|
JGE fixUpSlowForwardCopy
|
||||||
|
MOVQ (R15), BX
|
||||||
|
MOVQ BX, (DI)
|
||||||
|
SUBQ DX, CX
|
||||||
|
ADDQ DX, DI
|
||||||
|
ADDQ DX, DX
|
||||||
|
JMP makeOffsetAtLeast8
|
||||||
|
|
||||||
|
fixUpSlowForwardCopy:
|
||||||
|
// !!! Add length (which might be negative now) to d (implied by DI being
|
||||||
|
// &dst[d]) so that d ends up at the right place when we jump back to the
|
||||||
|
// top of the loop. Before we do that, though, we save DI to AX so that, if
|
||||||
|
// length is positive, copying the remaining length bytes will write to the
|
||||||
|
// right place.
|
||||||
|
MOVQ DI, AX
|
||||||
|
ADDQ CX, DI
|
||||||
|
|
||||||
|
finishSlowForwardCopy:
|
||||||
|
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
|
||||||
|
// length means that we overrun, but as above, that will be fixed up by
|
||||||
|
// subsequent iterations of the outermost loop.
|
||||||
|
CMPQ CX, $0
|
||||||
|
JLE loop
|
||||||
|
MOVQ (R15), BX
|
||||||
|
MOVQ BX, (AX)
|
||||||
|
ADDQ $8, R15
|
||||||
|
ADDQ $8, AX
|
||||||
|
SUBQ $8, CX
|
||||||
|
JMP finishSlowForwardCopy
|
||||||
|
|
||||||
|
verySlowForwardCopy:
|
||||||
|
// verySlowForwardCopy is a simple implementation of forward copy. In C
|
||||||
|
// parlance, this is a do/while loop instead of a while loop, since we know
|
||||||
|
// that length > 0. In Go syntax:
|
||||||
|
//
|
||||||
|
// for {
|
||||||
|
// dst[d] = dst[d - offset]
|
||||||
|
// d++
|
||||||
|
// length--
|
||||||
|
// if length == 0 {
|
||||||
|
// break
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
MOVB (R15), BX
|
||||||
|
MOVB BX, (DI)
|
||||||
|
INCQ R15
|
||||||
|
INCQ DI
|
||||||
|
DECQ CX
|
||||||
|
JNZ verySlowForwardCopy
|
||||||
|
JMP loop
|
||||||
|
|
||||||
|
// The code above handles copy tags.
|
||||||
|
// ----------------------------------------
|
||||||
|
|
||||||
|
end:
|
||||||
|
// This is the end of the "for s < len(src)".
|
||||||
|
//
|
||||||
|
// if d != len(dst) { etc }
|
||||||
|
CMPQ DI, R10
|
||||||
|
JNE errCorrupt
|
||||||
|
|
||||||
|
// return 0
|
||||||
|
MOVQ $0, ret+48(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
errCorrupt:
|
||||||
|
// return decodeErrCodeCorrupt
|
||||||
|
MOVQ $1, ret+48(FP)
|
||||||
|
RET
|
101
vendor/github.com/golang/snappy/decode_other.go
generated
vendored
Normal file
101
vendor/github.com/golang/snappy/decode_other.go
generated
vendored
Normal file
|
@ -0,0 +1,101 @@
|
||||||
|
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !amd64 appengine !gc noasm
|
||||||
|
|
||||||
|
package snappy
|
||||||
|
|
||||||
|
// decode writes the decoding of src to dst. It assumes that the varint-encoded
|
||||||
|
// length of the decompressed bytes has already been read, and that len(dst)
|
||||||
|
// equals that length.
|
||||||
|
//
|
||||||
|
// It returns 0 on success or a decodeErrCodeXxx error code on failure.
|
||||||
|
func decode(dst, src []byte) int {
|
||||||
|
var d, s, offset, length int
|
||||||
|
for s < len(src) {
|
||||||
|
switch src[s] & 0x03 {
|
||||||
|
case tagLiteral:
|
||||||
|
x := uint32(src[s] >> 2)
|
||||||
|
switch {
|
||||||
|
case x < 60:
|
||||||
|
s++
|
||||||
|
case x == 60:
|
||||||
|
s += 2
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
x = uint32(src[s-1])
|
||||||
|
case x == 61:
|
||||||
|
s += 3
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
x = uint32(src[s-2]) | uint32(src[s-1])<<8
|
||||||
|
case x == 62:
|
||||||
|
s += 4
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
|
||||||
|
case x == 63:
|
||||||
|
s += 5
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
|
||||||
|
}
|
||||||
|
length = int(x) + 1
|
||||||
|
if length <= 0 {
|
||||||
|
return decodeErrCodeUnsupportedLiteralLength
|
||||||
|
}
|
||||||
|
if length > len(dst)-d || length > len(src)-s {
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
copy(dst[d:], src[s:s+length])
|
||||||
|
d += length
|
||||||
|
s += length
|
||||||
|
continue
|
||||||
|
|
||||||
|
case tagCopy1:
|
||||||
|
s += 2
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
length = 4 + int(src[s-2])>>2&0x7
|
||||||
|
offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
|
||||||
|
|
||||||
|
case tagCopy2:
|
||||||
|
s += 3
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
length = 1 + int(src[s-3])>>2
|
||||||
|
offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
|
||||||
|
|
||||||
|
case tagCopy4:
|
||||||
|
s += 5
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
length = 1 + int(src[s-5])>>2
|
||||||
|
offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset <= 0 || d < offset || length > len(dst)-d {
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
// Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
|
||||||
|
// the built-in copy function, this byte-by-byte copy always runs
|
||||||
|
// forwards, even if the slices overlap. Conceptually, this is:
|
||||||
|
//
|
||||||
|
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
|
||||||
|
for end := d + length; d != end; d++ {
|
||||||
|
dst[d] = dst[d-offset]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if d != len(dst) {
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
285
vendor/github.com/golang/snappy/encode.go
generated
vendored
Normal file
285
vendor/github.com/golang/snappy/encode.go
generated
vendored
Normal file
|
@ -0,0 +1,285 @@
|
||||||
|
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package snappy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Encode returns the encoded form of src. The returned slice may be a sub-
|
||||||
|
// slice of dst if dst was large enough to hold the entire encoded block.
|
||||||
|
// Otherwise, a newly allocated slice will be returned.
|
||||||
|
//
|
||||||
|
// The dst and src must not overlap. It is valid to pass a nil dst.
|
||||||
|
func Encode(dst, src []byte) []byte {
|
||||||
|
if n := MaxEncodedLen(len(src)); n < 0 {
|
||||||
|
panic(ErrTooLarge)
|
||||||
|
} else if len(dst) < n {
|
||||||
|
dst = make([]byte, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The block starts with the varint-encoded length of the decompressed bytes.
|
||||||
|
d := binary.PutUvarint(dst, uint64(len(src)))
|
||||||
|
|
||||||
|
for len(src) > 0 {
|
||||||
|
p := src
|
||||||
|
src = nil
|
||||||
|
if len(p) > maxBlockSize {
|
||||||
|
p, src = p[:maxBlockSize], p[maxBlockSize:]
|
||||||
|
}
|
||||||
|
if len(p) < minNonLiteralBlockSize {
|
||||||
|
d += emitLiteral(dst[d:], p)
|
||||||
|
} else {
|
||||||
|
d += encodeBlock(dst[d:], p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst[:d]
|
||||||
|
}
|
||||||
|
|
||||||
|
// inputMargin is the minimum number of extra input bytes to keep, inside
|
||||||
|
// encodeBlock's inner loop. On some architectures, this margin lets us
|
||||||
|
// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
|
||||||
|
// literals can be implemented as a single load to and store from a 16-byte
|
||||||
|
// register. That literal's actual length can be as short as 1 byte, so this
|
||||||
|
// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
|
||||||
|
// the encoding loop will fix up the copy overrun, and this inputMargin ensures
|
||||||
|
// that we don't overrun the dst and src buffers.
|
||||||
|
const inputMargin = 16 - 1
|
||||||
|
|
||||||
|
// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
|
||||||
|
// could be encoded with a copy tag. This is the minimum with respect to the
|
||||||
|
// algorithm used by encodeBlock, not a minimum enforced by the file format.
|
||||||
|
//
|
||||||
|
// The encoded output must start with at least a 1 byte literal, as there are
|
||||||
|
// no previous bytes to copy. A minimal (1 byte) copy after that, generated
|
||||||
|
// from an emitCopy call in encodeBlock's main loop, would require at least
|
||||||
|
// another inputMargin bytes, for the reason above: we want any emitLiteral
|
||||||
|
// calls inside encodeBlock's main loop to use the fast path if possible, which
|
||||||
|
// requires being able to overrun by inputMargin bytes. Thus,
|
||||||
|
// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
|
||||||
|
//
|
||||||
|
// The C++ code doesn't use this exact threshold, but it could, as discussed at
|
||||||
|
// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
|
||||||
|
// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
|
||||||
|
// optimization. It should not affect the encoded form. This is tested by
|
||||||
|
// TestSameEncodingAsCppShortCopies.
|
||||||
|
const minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||||
|
|
||||||
|
// MaxEncodedLen returns the maximum length of a snappy block, given its
|
||||||
|
// uncompressed length.
|
||||||
|
//
|
||||||
|
// It will return a negative value if srcLen is too large to encode.
|
||||||
|
func MaxEncodedLen(srcLen int) int {
|
||||||
|
n := uint64(srcLen)
|
||||||
|
if n > 0xffffffff {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
// Compressed data can be defined as:
|
||||||
|
// compressed := item* literal*
|
||||||
|
// item := literal* copy
|
||||||
|
//
|
||||||
|
// The trailing literal sequence has a space blowup of at most 62/60
|
||||||
|
// since a literal of length 60 needs one tag byte + one extra byte
|
||||||
|
// for length information.
|
||||||
|
//
|
||||||
|
// Item blowup is trickier to measure. Suppose the "copy" op copies
|
||||||
|
// 4 bytes of data. Because of a special check in the encoding code,
|
||||||
|
// we produce a 4-byte copy only if the offset is < 65536. Therefore
|
||||||
|
// the copy op takes 3 bytes to encode, and this type of item leads
|
||||||
|
// to at most the 62/60 blowup for representing literals.
|
||||||
|
//
|
||||||
|
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
|
||||||
|
// enough, it will take 5 bytes to encode the copy op. Therefore the
|
||||||
|
// worst case here is a one-byte literal followed by a five-byte copy.
|
||||||
|
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
|
||||||
|
//
|
||||||
|
// This last factor dominates the blowup, so the final estimate is:
|
||||||
|
n = 32 + n + n/6
|
||||||
|
if n > 0xffffffff {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return int(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
var errClosed = errors.New("snappy: Writer is closed")
|
||||||
|
|
||||||
|
// NewWriter returns a new Writer that compresses to w.
|
||||||
|
//
|
||||||
|
// The Writer returned does not buffer writes. There is no need to Flush or
|
||||||
|
// Close such a Writer.
|
||||||
|
//
|
||||||
|
// Deprecated: the Writer returned is not suitable for many small writes, only
|
||||||
|
// for few large writes. Use NewBufferedWriter instead, which is efficient
|
||||||
|
// regardless of the frequency and shape of the writes, and remember to Close
|
||||||
|
// that Writer when done.
|
||||||
|
func NewWriter(w io.Writer) *Writer {
|
||||||
|
return &Writer{
|
||||||
|
w: w,
|
||||||
|
obuf: make([]byte, obufLen),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBufferedWriter returns a new Writer that compresses to w, using the
|
||||||
|
// framing format described at
|
||||||
|
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||||
|
//
|
||||||
|
// The Writer returned buffers writes. Users must call Close to guarantee all
|
||||||
|
// data has been forwarded to the underlying io.Writer. They may also call
|
||||||
|
// Flush zero or more times before calling Close.
|
||||||
|
func NewBufferedWriter(w io.Writer) *Writer {
|
||||||
|
return &Writer{
|
||||||
|
w: w,
|
||||||
|
ibuf: make([]byte, 0, maxBlockSize),
|
||||||
|
obuf: make([]byte, obufLen),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writer is an io.Writer that can write Snappy-compressed bytes.
|
||||||
|
type Writer struct {
|
||||||
|
w io.Writer
|
||||||
|
err error
|
||||||
|
|
||||||
|
// ibuf is a buffer for the incoming (uncompressed) bytes.
|
||||||
|
//
|
||||||
|
// Its use is optional. For backwards compatibility, Writers created by the
|
||||||
|
// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
|
||||||
|
// therefore do not need to be Flush'ed or Close'd.
|
||||||
|
ibuf []byte
|
||||||
|
|
||||||
|
// obuf is a buffer for the outgoing (compressed) bytes.
|
||||||
|
obuf []byte
|
||||||
|
|
||||||
|
// wroteStreamHeader is whether we have written the stream header.
|
||||||
|
wroteStreamHeader bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset discards the writer's state and switches the Snappy writer to write to
|
||||||
|
// w. This permits reusing a Writer rather than allocating a new one.
|
||||||
|
func (w *Writer) Reset(writer io.Writer) {
|
||||||
|
w.w = writer
|
||||||
|
w.err = nil
|
||||||
|
if w.ibuf != nil {
|
||||||
|
w.ibuf = w.ibuf[:0]
|
||||||
|
}
|
||||||
|
w.wroteStreamHeader = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write satisfies the io.Writer interface.
|
||||||
|
func (w *Writer) Write(p []byte) (nRet int, errRet error) {
|
||||||
|
if w.ibuf == nil {
|
||||||
|
// Do not buffer incoming bytes. This does not perform or compress well
|
||||||
|
// if the caller of Writer.Write writes many small slices. This
|
||||||
|
// behavior is therefore deprecated, but still supported for backwards
|
||||||
|
// compatibility with code that doesn't explicitly Flush or Close.
|
||||||
|
return w.write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The remainder of this method is based on bufio.Writer.Write from the
|
||||||
|
// standard library.
|
||||||
|
|
||||||
|
for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
|
||||||
|
var n int
|
||||||
|
if len(w.ibuf) == 0 {
|
||||||
|
// Large write, empty buffer.
|
||||||
|
// Write directly from p to avoid copy.
|
||||||
|
n, _ = w.write(p)
|
||||||
|
} else {
|
||||||
|
n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
|
||||||
|
w.ibuf = w.ibuf[:len(w.ibuf)+n]
|
||||||
|
w.Flush()
|
||||||
|
}
|
||||||
|
nRet += n
|
||||||
|
p = p[n:]
|
||||||
|
}
|
||||||
|
if w.err != nil {
|
||||||
|
return nRet, w.err
|
||||||
|
}
|
||||||
|
n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
|
||||||
|
w.ibuf = w.ibuf[:len(w.ibuf)+n]
|
||||||
|
nRet += n
|
||||||
|
return nRet, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) write(p []byte) (nRet int, errRet error) {
|
||||||
|
if w.err != nil {
|
||||||
|
return 0, w.err
|
||||||
|
}
|
||||||
|
for len(p) > 0 {
|
||||||
|
obufStart := len(magicChunk)
|
||||||
|
if !w.wroteStreamHeader {
|
||||||
|
w.wroteStreamHeader = true
|
||||||
|
copy(w.obuf, magicChunk)
|
||||||
|
obufStart = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var uncompressed []byte
|
||||||
|
if len(p) > maxBlockSize {
|
||||||
|
uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
|
||||||
|
} else {
|
||||||
|
uncompressed, p = p, nil
|
||||||
|
}
|
||||||
|
checksum := crc(uncompressed)
|
||||||
|
|
||||||
|
// Compress the buffer, discarding the result if the improvement
|
||||||
|
// isn't at least 12.5%.
|
||||||
|
compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
|
||||||
|
chunkType := uint8(chunkTypeCompressedData)
|
||||||
|
chunkLen := 4 + len(compressed)
|
||||||
|
obufEnd := obufHeaderLen + len(compressed)
|
||||||
|
if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
|
||||||
|
chunkType = chunkTypeUncompressedData
|
||||||
|
chunkLen = 4 + len(uncompressed)
|
||||||
|
obufEnd = obufHeaderLen
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fill in the per-chunk header that comes before the body.
|
||||||
|
w.obuf[len(magicChunk)+0] = chunkType
|
||||||
|
w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
|
||||||
|
w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
|
||||||
|
w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
|
||||||
|
w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
|
||||||
|
w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
|
||||||
|
w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
|
||||||
|
w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
|
||||||
|
|
||||||
|
if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
|
||||||
|
w.err = err
|
||||||
|
return nRet, err
|
||||||
|
}
|
||||||
|
if chunkType == chunkTypeUncompressedData {
|
||||||
|
if _, err := w.w.Write(uncompressed); err != nil {
|
||||||
|
w.err = err
|
||||||
|
return nRet, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
nRet += len(uncompressed)
|
||||||
|
}
|
||||||
|
return nRet, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush flushes the Writer to its underlying io.Writer.
|
||||||
|
func (w *Writer) Flush() error {
|
||||||
|
if w.err != nil {
|
||||||
|
return w.err
|
||||||
|
}
|
||||||
|
if len(w.ibuf) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
w.write(w.ibuf)
|
||||||
|
w.ibuf = w.ibuf[:0]
|
||||||
|
return w.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close calls Flush and then closes the Writer.
|
||||||
|
func (w *Writer) Close() error {
|
||||||
|
w.Flush()
|
||||||
|
ret := w.err
|
||||||
|
if w.err == nil {
|
||||||
|
w.err = errClosed
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
29
vendor/github.com/golang/snappy/encode_amd64.go
generated
vendored
Normal file
29
vendor/github.com/golang/snappy/encode_amd64.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !noasm
|
||||||
|
|
||||||
|
package snappy
|
||||||
|
|
||||||
|
// emitLiteral has the same semantics as in encode_other.go.
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func emitLiteral(dst, lit []byte) int
|
||||||
|
|
||||||
|
// emitCopy has the same semantics as in encode_other.go.
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func emitCopy(dst []byte, offset, length int) int
|
||||||
|
|
||||||
|
// extendMatch has the same semantics as in encode_other.go.
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func extendMatch(src []byte, i, j int) int
|
||||||
|
|
||||||
|
// encodeBlock has the same semantics as in encode_other.go.
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func encodeBlock(dst, src []byte) (d int)
|
730
vendor/github.com/golang/snappy/encode_amd64.s
generated
vendored
Normal file
730
vendor/github.com/golang/snappy/encode_amd64.s
generated
vendored
Normal file
|
@ -0,0 +1,730 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !noasm
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
|
||||||
|
// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
|
||||||
|
// https://github.com/golang/snappy/issues/29
|
||||||
|
//
|
||||||
|
// As a workaround, the package was built with a known good assembler, and
|
||||||
|
// those instructions were disassembled by "objdump -d" to yield the
|
||||||
|
// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
|
||||||
|
// style comments, in AT&T asm syntax. Note that rsp here is a physical
|
||||||
|
// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
|
||||||
|
// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
|
||||||
|
// fine on Go 1.6.
|
||||||
|
|
||||||
|
// The asm code generally follows the pure Go code in encode_other.go, except
|
||||||
|
// where marked with a "!!!".
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// func emitLiteral(dst, lit []byte) int
|
||||||
|
//
|
||||||
|
// All local variables fit into registers. The register allocation:
|
||||||
|
// - AX len(lit)
|
||||||
|
// - BX n
|
||||||
|
// - DX return value
|
||||||
|
// - DI &dst[i]
|
||||||
|
// - R10 &lit[0]
|
||||||
|
//
|
||||||
|
// The 24 bytes of stack space is to call runtime·memmove.
|
||||||
|
//
|
||||||
|
// The unusual register allocation of local variables, such as R10 for the
|
||||||
|
// source pointer, matches the allocation used at the call site in encodeBlock,
|
||||||
|
// which makes it easier to manually inline this function.
|
||||||
|
TEXT ·emitLiteral(SB), NOSPLIT, $24-56
|
||||||
|
MOVQ dst_base+0(FP), DI
|
||||||
|
MOVQ lit_base+24(FP), R10
|
||||||
|
MOVQ lit_len+32(FP), AX
|
||||||
|
MOVQ AX, DX
|
||||||
|
MOVL AX, BX
|
||||||
|
SUBL $1, BX
|
||||||
|
|
||||||
|
CMPL BX, $60
|
||||||
|
JLT oneByte
|
||||||
|
CMPL BX, $256
|
||||||
|
JLT twoBytes
|
||||||
|
|
||||||
|
threeBytes:
|
||||||
|
MOVB $0xf4, 0(DI)
|
||||||
|
MOVW BX, 1(DI)
|
||||||
|
ADDQ $3, DI
|
||||||
|
ADDQ $3, DX
|
||||||
|
JMP memmove
|
||||||
|
|
||||||
|
twoBytes:
|
||||||
|
MOVB $0xf0, 0(DI)
|
||||||
|
MOVB BX, 1(DI)
|
||||||
|
ADDQ $2, DI
|
||||||
|
ADDQ $2, DX
|
||||||
|
JMP memmove
|
||||||
|
|
||||||
|
oneByte:
|
||||||
|
SHLB $2, BX
|
||||||
|
MOVB BX, 0(DI)
|
||||||
|
ADDQ $1, DI
|
||||||
|
ADDQ $1, DX
|
||||||
|
|
||||||
|
memmove:
|
||||||
|
MOVQ DX, ret+48(FP)
|
||||||
|
|
||||||
|
// copy(dst[i:], lit)
|
||||||
|
//
|
||||||
|
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
|
||||||
|
// DI, R10 and AX as arguments.
|
||||||
|
MOVQ DI, 0(SP)
|
||||||
|
MOVQ R10, 8(SP)
|
||||||
|
MOVQ AX, 16(SP)
|
||||||
|
CALL runtime·memmove(SB)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// func emitCopy(dst []byte, offset, length int) int
|
||||||
|
//
|
||||||
|
// All local variables fit into registers. The register allocation:
|
||||||
|
// - AX length
|
||||||
|
// - SI &dst[0]
|
||||||
|
// - DI &dst[i]
|
||||||
|
// - R11 offset
|
||||||
|
//
|
||||||
|
// The unusual register allocation of local variables, such as R11 for the
|
||||||
|
// offset, matches the allocation used at the call site in encodeBlock, which
|
||||||
|
// makes it easier to manually inline this function.
|
||||||
|
TEXT ·emitCopy(SB), NOSPLIT, $0-48
|
||||||
|
MOVQ dst_base+0(FP), DI
|
||||||
|
MOVQ DI, SI
|
||||||
|
MOVQ offset+24(FP), R11
|
||||||
|
MOVQ length+32(FP), AX
|
||||||
|
|
||||||
|
loop0:
|
||||||
|
// for length >= 68 { etc }
|
||||||
|
CMPL AX, $68
|
||||||
|
JLT step1
|
||||||
|
|
||||||
|
// Emit a length 64 copy, encoded as 3 bytes.
|
||||||
|
MOVB $0xfe, 0(DI)
|
||||||
|
MOVW R11, 1(DI)
|
||||||
|
ADDQ $3, DI
|
||||||
|
SUBL $64, AX
|
||||||
|
JMP loop0
|
||||||
|
|
||||||
|
step1:
|
||||||
|
// if length > 64 { etc }
|
||||||
|
CMPL AX, $64
|
||||||
|
JLE step2
|
||||||
|
|
||||||
|
// Emit a length 60 copy, encoded as 3 bytes.
|
||||||
|
MOVB $0xee, 0(DI)
|
||||||
|
MOVW R11, 1(DI)
|
||||||
|
ADDQ $3, DI
|
||||||
|
SUBL $60, AX
|
||||||
|
|
||||||
|
step2:
|
||||||
|
// if length >= 12 || offset >= 2048 { goto step3 }
|
||||||
|
CMPL AX, $12
|
||||||
|
JGE step3
|
||||||
|
CMPL R11, $2048
|
||||||
|
JGE step3
|
||||||
|
|
||||||
|
// Emit the remaining copy, encoded as 2 bytes.
|
||||||
|
MOVB R11, 1(DI)
|
||||||
|
SHRL $8, R11
|
||||||
|
SHLB $5, R11
|
||||||
|
SUBB $4, AX
|
||||||
|
SHLB $2, AX
|
||||||
|
ORB AX, R11
|
||||||
|
ORB $1, R11
|
||||||
|
MOVB R11, 0(DI)
|
||||||
|
ADDQ $2, DI
|
||||||
|
|
||||||
|
// Return the number of bytes written.
|
||||||
|
SUBQ SI, DI
|
||||||
|
MOVQ DI, ret+40(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
step3:
|
||||||
|
// Emit the remaining copy, encoded as 3 bytes.
|
||||||
|
SUBL $1, AX
|
||||||
|
SHLB $2, AX
|
||||||
|
ORB $2, AX
|
||||||
|
MOVB AX, 0(DI)
|
||||||
|
MOVW R11, 1(DI)
|
||||||
|
ADDQ $3, DI
|
||||||
|
|
||||||
|
// Return the number of bytes written.
|
||||||
|
SUBQ SI, DI
|
||||||
|
MOVQ DI, ret+40(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// func extendMatch(src []byte, i, j int) int
|
||||||
|
//
|
||||||
|
// All local variables fit into registers. The register allocation:
|
||||||
|
// - DX &src[0]
|
||||||
|
// - SI &src[j]
|
||||||
|
// - R13 &src[len(src) - 8]
|
||||||
|
// - R14 &src[len(src)]
|
||||||
|
// - R15 &src[i]
|
||||||
|
//
|
||||||
|
// The unusual register allocation of local variables, such as R15 for a source
|
||||||
|
// pointer, matches the allocation used at the call site in encodeBlock, which
|
||||||
|
// makes it easier to manually inline this function.
|
||||||
|
TEXT ·extendMatch(SB), NOSPLIT, $0-48
|
||||||
|
MOVQ src_base+0(FP), DX
|
||||||
|
MOVQ src_len+8(FP), R14
|
||||||
|
MOVQ i+24(FP), R15
|
||||||
|
MOVQ j+32(FP), SI
|
||||||
|
ADDQ DX, R14
|
||||||
|
ADDQ DX, R15
|
||||||
|
ADDQ DX, SI
|
||||||
|
MOVQ R14, R13
|
||||||
|
SUBQ $8, R13
|
||||||
|
|
||||||
|
cmp8:
|
||||||
|
// As long as we are 8 or more bytes before the end of src, we can load and
|
||||||
|
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
|
||||||
|
CMPQ SI, R13
|
||||||
|
JA cmp1
|
||||||
|
MOVQ (R15), AX
|
||||||
|
MOVQ (SI), BX
|
||||||
|
CMPQ AX, BX
|
||||||
|
JNE bsf
|
||||||
|
ADDQ $8, R15
|
||||||
|
ADDQ $8, SI
|
||||||
|
JMP cmp8
|
||||||
|
|
||||||
|
bsf:
|
||||||
|
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
|
||||||
|
// the index of the first byte that differs. The BSF instruction finds the
|
||||||
|
// least significant 1 bit, the amd64 architecture is little-endian, and
|
||||||
|
// the shift by 3 converts a bit index to a byte index.
|
||||||
|
XORQ AX, BX
|
||||||
|
BSFQ BX, BX
|
||||||
|
SHRQ $3, BX
|
||||||
|
ADDQ BX, SI
|
||||||
|
|
||||||
|
// Convert from &src[ret] to ret.
|
||||||
|
SUBQ DX, SI
|
||||||
|
MOVQ SI, ret+40(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
cmp1:
|
||||||
|
// In src's tail, compare 1 byte at a time.
|
||||||
|
CMPQ SI, R14
|
||||||
|
JAE extendMatchEnd
|
||||||
|
MOVB (R15), AX
|
||||||
|
MOVB (SI), BX
|
||||||
|
CMPB AX, BX
|
||||||
|
JNE extendMatchEnd
|
||||||
|
ADDQ $1, R15
|
||||||
|
ADDQ $1, SI
|
||||||
|
JMP cmp1
|
||||||
|
|
||||||
|
extendMatchEnd:
|
||||||
|
// Convert from &src[ret] to ret.
|
||||||
|
SUBQ DX, SI
|
||||||
|
MOVQ SI, ret+40(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// func encodeBlock(dst, src []byte) (d int)
|
||||||
|
//
|
||||||
|
// All local variables fit into registers, other than "var table". The register
|
||||||
|
// allocation:
|
||||||
|
// - AX . .
|
||||||
|
// - BX . .
|
||||||
|
// - CX 56 shift (note that amd64 shifts by non-immediates must use CX).
|
||||||
|
// - DX 64 &src[0], tableSize
|
||||||
|
// - SI 72 &src[s]
|
||||||
|
// - DI 80 &dst[d]
|
||||||
|
// - R9 88 sLimit
|
||||||
|
// - R10 . &src[nextEmit]
|
||||||
|
// - R11 96 prevHash, currHash, nextHash, offset
|
||||||
|
// - R12 104 &src[base], skip
|
||||||
|
// - R13 . &src[nextS], &src[len(src) - 8]
|
||||||
|
// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
|
||||||
|
// - R15 112 candidate
|
||||||
|
//
|
||||||
|
// The second column (56, 64, etc) is the stack offset to spill the registers
|
||||||
|
// when calling other functions. We could pack this slightly tighter, but it's
|
||||||
|
// simpler to have a dedicated spill map independent of the function called.
|
||||||
|
//
|
||||||
|
// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
|
||||||
|
// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
|
||||||
|
// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
|
||||||
|
TEXT ·encodeBlock(SB), 0, $32888-56
|
||||||
|
MOVQ dst_base+0(FP), DI
|
||||||
|
MOVQ src_base+24(FP), SI
|
||||||
|
MOVQ src_len+32(FP), R14
|
||||||
|
|
||||||
|
// shift, tableSize := uint32(32-8), 1<<8
|
||||||
|
MOVQ $24, CX
|
||||||
|
MOVQ $256, DX
|
||||||
|
|
||||||
|
calcShift:
|
||||||
|
// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
|
||||||
|
// shift--
|
||||||
|
// }
|
||||||
|
CMPQ DX, $16384
|
||||||
|
JGE varTable
|
||||||
|
CMPQ DX, R14
|
||||||
|
JGE varTable
|
||||||
|
SUBQ $1, CX
|
||||||
|
SHLQ $1, DX
|
||||||
|
JMP calcShift
|
||||||
|
|
||||||
|
varTable:
|
||||||
|
// var table [maxTableSize]uint16
|
||||||
|
//
|
||||||
|
// In the asm code, unlike the Go code, we can zero-initialize only the
|
||||||
|
// first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
|
||||||
|
// writes 16 bytes, so we can do only tableSize/8 writes instead of the
|
||||||
|
// 2048 writes that would zero-initialize all of table's 32768 bytes.
|
||||||
|
SHRQ $3, DX
|
||||||
|
LEAQ table-32768(SP), BX
|
||||||
|
PXOR X0, X0
|
||||||
|
|
||||||
|
memclr:
|
||||||
|
MOVOU X0, 0(BX)
|
||||||
|
ADDQ $16, BX
|
||||||
|
SUBQ $1, DX
|
||||||
|
JNZ memclr
|
||||||
|
|
||||||
|
// !!! DX = &src[0]
|
||||||
|
MOVQ SI, DX
|
||||||
|
|
||||||
|
// sLimit := len(src) - inputMargin
|
||||||
|
MOVQ R14, R9
|
||||||
|
SUBQ $15, R9
|
||||||
|
|
||||||
|
// !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
|
||||||
|
// change for the rest of the function.
|
||||||
|
MOVQ CX, 56(SP)
|
||||||
|
MOVQ DX, 64(SP)
|
||||||
|
MOVQ R9, 88(SP)
|
||||||
|
|
||||||
|
// nextEmit := 0
|
||||||
|
MOVQ DX, R10
|
||||||
|
|
||||||
|
// s := 1
|
||||||
|
ADDQ $1, SI
|
||||||
|
|
||||||
|
// nextHash := hash(load32(src, s), shift)
|
||||||
|
MOVL 0(SI), R11
|
||||||
|
IMULL $0x1e35a7bd, R11
|
||||||
|
SHRL CX, R11
|
||||||
|
|
||||||
|
outer:
|
||||||
|
// for { etc }
|
||||||
|
|
||||||
|
// skip := 32
|
||||||
|
MOVQ $32, R12
|
||||||
|
|
||||||
|
// nextS := s
|
||||||
|
MOVQ SI, R13
|
||||||
|
|
||||||
|
// candidate := 0
|
||||||
|
MOVQ $0, R15
|
||||||
|
|
||||||
|
inner0:
|
||||||
|
// for { etc }
|
||||||
|
|
||||||
|
// s := nextS
|
||||||
|
MOVQ R13, SI
|
||||||
|
|
||||||
|
// bytesBetweenHashLookups := skip >> 5
|
||||||
|
MOVQ R12, R14
|
||||||
|
SHRQ $5, R14
|
||||||
|
|
||||||
|
// nextS = s + bytesBetweenHashLookups
|
||||||
|
ADDQ R14, R13
|
||||||
|
|
||||||
|
// skip += bytesBetweenHashLookups
|
||||||
|
ADDQ R14, R12
|
||||||
|
|
||||||
|
// if nextS > sLimit { goto emitRemainder }
|
||||||
|
MOVQ R13, AX
|
||||||
|
SUBQ DX, AX
|
||||||
|
CMPQ AX, R9
|
||||||
|
JA emitRemainder
|
||||||
|
|
||||||
|
// candidate = int(table[nextHash])
|
||||||
|
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
|
||||||
|
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
|
||||||
|
BYTE $0x4e
|
||||||
|
BYTE $0x0f
|
||||||
|
BYTE $0xb7
|
||||||
|
BYTE $0x7c
|
||||||
|
BYTE $0x5c
|
||||||
|
BYTE $0x78
|
||||||
|
|
||||||
|
// table[nextHash] = uint16(s)
|
||||||
|
MOVQ SI, AX
|
||||||
|
SUBQ DX, AX
|
||||||
|
|
||||||
|
// XXX: MOVW AX, table-32768(SP)(R11*2)
|
||||||
|
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
|
||||||
|
BYTE $0x66
|
||||||
|
BYTE $0x42
|
||||||
|
BYTE $0x89
|
||||||
|
BYTE $0x44
|
||||||
|
BYTE $0x5c
|
||||||
|
BYTE $0x78
|
||||||
|
|
||||||
|
// nextHash = hash(load32(src, nextS), shift)
|
||||||
|
MOVL 0(R13), R11
|
||||||
|
IMULL $0x1e35a7bd, R11
|
||||||
|
SHRL CX, R11
|
||||||
|
|
||||||
|
// if load32(src, s) != load32(src, candidate) { continue } break
|
||||||
|
MOVL 0(SI), AX
|
||||||
|
MOVL (DX)(R15*1), BX
|
||||||
|
CMPL AX, BX
|
||||||
|
JNE inner0
|
||||||
|
|
||||||
|
fourByteMatch:
|
||||||
|
// As per the encode_other.go code:
|
||||||
|
//
|
||||||
|
// A 4-byte match has been found. We'll later see etc.
|
||||||
|
|
||||||
|
// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
|
||||||
|
// on inputMargin in encode.go.
|
||||||
|
MOVQ SI, AX
|
||||||
|
SUBQ R10, AX
|
||||||
|
CMPQ AX, $16
|
||||||
|
JLE emitLiteralFastPath
|
||||||
|
|
||||||
|
// ----------------------------------------
|
||||||
|
// Begin inline of the emitLiteral call.
|
||||||
|
//
|
||||||
|
// d += emitLiteral(dst[d:], src[nextEmit:s])
|
||||||
|
|
||||||
|
MOVL AX, BX
|
||||||
|
SUBL $1, BX
|
||||||
|
|
||||||
|
CMPL BX, $60
|
||||||
|
JLT inlineEmitLiteralOneByte
|
||||||
|
CMPL BX, $256
|
||||||
|
JLT inlineEmitLiteralTwoBytes
|
||||||
|
|
||||||
|
inlineEmitLiteralThreeBytes:
|
||||||
|
MOVB $0xf4, 0(DI)
|
||||||
|
MOVW BX, 1(DI)
|
||||||
|
ADDQ $3, DI
|
||||||
|
JMP inlineEmitLiteralMemmove
|
||||||
|
|
||||||
|
inlineEmitLiteralTwoBytes:
|
||||||
|
MOVB $0xf0, 0(DI)
|
||||||
|
MOVB BX, 1(DI)
|
||||||
|
ADDQ $2, DI
|
||||||
|
JMP inlineEmitLiteralMemmove
|
||||||
|
|
||||||
|
inlineEmitLiteralOneByte:
|
||||||
|
SHLB $2, BX
|
||||||
|
MOVB BX, 0(DI)
|
||||||
|
ADDQ $1, DI
|
||||||
|
|
||||||
|
inlineEmitLiteralMemmove:
|
||||||
|
// Spill local variables (registers) onto the stack; call; unspill.
|
||||||
|
//
|
||||||
|
// copy(dst[i:], lit)
|
||||||
|
//
|
||||||
|
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
|
||||||
|
// DI, R10 and AX as arguments.
|
||||||
|
MOVQ DI, 0(SP)
|
||||||
|
MOVQ R10, 8(SP)
|
||||||
|
MOVQ AX, 16(SP)
|
||||||
|
ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)".
|
||||||
|
MOVQ SI, 72(SP)
|
||||||
|
MOVQ DI, 80(SP)
|
||||||
|
MOVQ R15, 112(SP)
|
||||||
|
CALL runtime·memmove(SB)
|
||||||
|
MOVQ 56(SP), CX
|
||||||
|
MOVQ 64(SP), DX
|
||||||
|
MOVQ 72(SP), SI
|
||||||
|
MOVQ 80(SP), DI
|
||||||
|
MOVQ 88(SP), R9
|
||||||
|
MOVQ 112(SP), R15
|
||||||
|
JMP inner1
|
||||||
|
|
||||||
|
inlineEmitLiteralEnd:
|
||||||
|
// End inline of the emitLiteral call.
|
||||||
|
// ----------------------------------------
|
||||||
|
|
||||||
|
emitLiteralFastPath:
|
||||||
|
// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
|
||||||
|
MOVB AX, BX
|
||||||
|
SUBB $1, BX
|
||||||
|
SHLB $2, BX
|
||||||
|
MOVB BX, (DI)
|
||||||
|
ADDQ $1, DI
|
||||||
|
|
||||||
|
// !!! Implement the copy from lit to dst as a 16-byte load and store.
|
||||||
|
// (Encode's documentation says that dst and src must not overlap.)
|
||||||
|
//
|
||||||
|
// This always copies 16 bytes, instead of only len(lit) bytes, but that's
|
||||||
|
// OK. Subsequent iterations will fix up the overrun.
|
||||||
|
//
|
||||||
|
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
|
||||||
|
// 16-byte loads and stores. This technique probably wouldn't be as
|
||||||
|
// effective on architectures that are fussier about alignment.
|
||||||
|
MOVOU 0(R10), X0
|
||||||
|
MOVOU X0, 0(DI)
|
||||||
|
ADDQ AX, DI
|
||||||
|
|
||||||
|
inner1:
|
||||||
|
// for { etc }
|
||||||
|
|
||||||
|
// base := s
|
||||||
|
MOVQ SI, R12
|
||||||
|
|
||||||
|
// !!! offset := base - candidate
|
||||||
|
MOVQ R12, R11
|
||||||
|
SUBQ R15, R11
|
||||||
|
SUBQ DX, R11
|
||||||
|
|
||||||
|
// ----------------------------------------
|
||||||
|
// Begin inline of the extendMatch call.
|
||||||
|
//
|
||||||
|
// s = extendMatch(src, candidate+4, s+4)
|
||||||
|
|
||||||
|
// !!! R14 = &src[len(src)]
|
||||||
|
MOVQ src_len+32(FP), R14
|
||||||
|
ADDQ DX, R14
|
||||||
|
|
||||||
|
// !!! R13 = &src[len(src) - 8]
|
||||||
|
MOVQ R14, R13
|
||||||
|
SUBQ $8, R13
|
||||||
|
|
||||||
|
// !!! R15 = &src[candidate + 4]
|
||||||
|
ADDQ $4, R15
|
||||||
|
ADDQ DX, R15
|
||||||
|
|
||||||
|
// !!! s += 4
|
||||||
|
ADDQ $4, SI
|
||||||
|
|
||||||
|
inlineExtendMatchCmp8:
|
||||||
|
// As long as we are 8 or more bytes before the end of src, we can load and
|
||||||
|
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
|
||||||
|
CMPQ SI, R13
|
||||||
|
JA inlineExtendMatchCmp1
|
||||||
|
MOVQ (R15), AX
|
||||||
|
MOVQ (SI), BX
|
||||||
|
CMPQ AX, BX
|
||||||
|
JNE inlineExtendMatchBSF
|
||||||
|
ADDQ $8, R15
|
||||||
|
ADDQ $8, SI
|
||||||
|
JMP inlineExtendMatchCmp8
|
||||||
|
|
||||||
|
inlineExtendMatchBSF:
|
||||||
|
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
|
||||||
|
// the index of the first byte that differs. The BSF instruction finds the
|
||||||
|
// least significant 1 bit, the amd64 architecture is little-endian, and
|
||||||
|
// the shift by 3 converts a bit index to a byte index.
|
||||||
|
XORQ AX, BX
|
||||||
|
BSFQ BX, BX
|
||||||
|
SHRQ $3, BX
|
||||||
|
ADDQ BX, SI
|
||||||
|
JMP inlineExtendMatchEnd
|
||||||
|
|
||||||
|
inlineExtendMatchCmp1:
|
||||||
|
// In src's tail, compare 1 byte at a time.
|
||||||
|
CMPQ SI, R14
|
||||||
|
JAE inlineExtendMatchEnd
|
||||||
|
MOVB (R15), AX
|
||||||
|
MOVB (SI), BX
|
||||||
|
CMPB AX, BX
|
||||||
|
JNE inlineExtendMatchEnd
|
||||||
|
ADDQ $1, R15
|
||||||
|
ADDQ $1, SI
|
||||||
|
JMP inlineExtendMatchCmp1
|
||||||
|
|
||||||
|
inlineExtendMatchEnd:
|
||||||
|
// End inline of the extendMatch call.
|
||||||
|
// ----------------------------------------
|
||||||
|
|
||||||
|
// ----------------------------------------
|
||||||
|
// Begin inline of the emitCopy call.
|
||||||
|
//
|
||||||
|
// d += emitCopy(dst[d:], base-candidate, s-base)
|
||||||
|
|
||||||
|
// !!! length := s - base
|
||||||
|
MOVQ SI, AX
|
||||||
|
SUBQ R12, AX
|
||||||
|
|
||||||
|
inlineEmitCopyLoop0:
|
||||||
|
// for length >= 68 { etc }
|
||||||
|
CMPL AX, $68
|
||||||
|
JLT inlineEmitCopyStep1
|
||||||
|
|
||||||
|
// Emit a length 64 copy, encoded as 3 bytes.
|
||||||
|
MOVB $0xfe, 0(DI)
|
||||||
|
MOVW R11, 1(DI)
|
||||||
|
ADDQ $3, DI
|
||||||
|
SUBL $64, AX
|
||||||
|
JMP inlineEmitCopyLoop0
|
||||||
|
|
||||||
|
inlineEmitCopyStep1:
|
||||||
|
// if length > 64 { etc }
|
||||||
|
CMPL AX, $64
|
||||||
|
JLE inlineEmitCopyStep2
|
||||||
|
|
||||||
|
// Emit a length 60 copy, encoded as 3 bytes.
|
||||||
|
MOVB $0xee, 0(DI)
|
||||||
|
MOVW R11, 1(DI)
|
||||||
|
ADDQ $3, DI
|
||||||
|
SUBL $60, AX
|
||||||
|
|
||||||
|
inlineEmitCopyStep2:
|
||||||
|
// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
|
||||||
|
CMPL AX, $12
|
||||||
|
JGE inlineEmitCopyStep3
|
||||||
|
CMPL R11, $2048
|
||||||
|
JGE inlineEmitCopyStep3
|
||||||
|
|
||||||
|
// Emit the remaining copy, encoded as 2 bytes.
|
||||||
|
MOVB R11, 1(DI)
|
||||||
|
SHRL $8, R11
|
||||||
|
SHLB $5, R11
|
||||||
|
SUBB $4, AX
|
||||||
|
SHLB $2, AX
|
||||||
|
ORB AX, R11
|
||||||
|
ORB $1, R11
|
||||||
|
MOVB R11, 0(DI)
|
||||||
|
ADDQ $2, DI
|
||||||
|
JMP inlineEmitCopyEnd
|
||||||
|
|
||||||
|
inlineEmitCopyStep3:
|
||||||
|
// Emit the remaining copy, encoded as 3 bytes.
|
||||||
|
SUBL $1, AX
|
||||||
|
SHLB $2, AX
|
||||||
|
ORB $2, AX
|
||||||
|
MOVB AX, 0(DI)
|
||||||
|
MOVW R11, 1(DI)
|
||||||
|
ADDQ $3, DI
|
||||||
|
|
||||||
|
inlineEmitCopyEnd:
|
||||||
|
// End inline of the emitCopy call.
|
||||||
|
// ----------------------------------------
|
||||||
|
|
||||||
|
// nextEmit = s
|
||||||
|
MOVQ SI, R10
|
||||||
|
|
||||||
|
// if s >= sLimit { goto emitRemainder }
|
||||||
|
MOVQ SI, AX
|
||||||
|
SUBQ DX, AX
|
||||||
|
CMPQ AX, R9
|
||||||
|
JAE emitRemainder
|
||||||
|
|
||||||
|
// As per the encode_other.go code:
|
||||||
|
//
|
||||||
|
// We could immediately etc.
|
||||||
|
|
||||||
|
// x := load64(src, s-1)
|
||||||
|
MOVQ -1(SI), R14
|
||||||
|
|
||||||
|
// prevHash := hash(uint32(x>>0), shift)
|
||||||
|
MOVL R14, R11
|
||||||
|
IMULL $0x1e35a7bd, R11
|
||||||
|
SHRL CX, R11
|
||||||
|
|
||||||
|
// table[prevHash] = uint16(s-1)
|
||||||
|
MOVQ SI, AX
|
||||||
|
SUBQ DX, AX
|
||||||
|
SUBQ $1, AX
|
||||||
|
|
||||||
|
// XXX: MOVW AX, table-32768(SP)(R11*2)
|
||||||
|
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
|
||||||
|
BYTE $0x66
|
||||||
|
BYTE $0x42
|
||||||
|
BYTE $0x89
|
||||||
|
BYTE $0x44
|
||||||
|
BYTE $0x5c
|
||||||
|
BYTE $0x78
|
||||||
|
|
||||||
|
// currHash := hash(uint32(x>>8), shift)
|
||||||
|
SHRQ $8, R14
|
||||||
|
MOVL R14, R11
|
||||||
|
IMULL $0x1e35a7bd, R11
|
||||||
|
SHRL CX, R11
|
||||||
|
|
||||||
|
// candidate = int(table[currHash])
|
||||||
|
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
|
||||||
|
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
|
||||||
|
BYTE $0x4e
|
||||||
|
BYTE $0x0f
|
||||||
|
BYTE $0xb7
|
||||||
|
BYTE $0x7c
|
||||||
|
BYTE $0x5c
|
||||||
|
BYTE $0x78
|
||||||
|
|
||||||
|
// table[currHash] = uint16(s)
|
||||||
|
ADDQ $1, AX
|
||||||
|
|
||||||
|
// XXX: MOVW AX, table-32768(SP)(R11*2)
|
||||||
|
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
|
||||||
|
BYTE $0x66
|
||||||
|
BYTE $0x42
|
||||||
|
BYTE $0x89
|
||||||
|
BYTE $0x44
|
||||||
|
BYTE $0x5c
|
||||||
|
BYTE $0x78
|
||||||
|
|
||||||
|
// if uint32(x>>8) == load32(src, candidate) { continue }
|
||||||
|
MOVL (DX)(R15*1), BX
|
||||||
|
CMPL R14, BX
|
||||||
|
JEQ inner1
|
||||||
|
|
||||||
|
// nextHash = hash(uint32(x>>16), shift)
|
||||||
|
SHRQ $8, R14
|
||||||
|
MOVL R14, R11
|
||||||
|
IMULL $0x1e35a7bd, R11
|
||||||
|
SHRL CX, R11
|
||||||
|
|
||||||
|
// s++
|
||||||
|
ADDQ $1, SI
|
||||||
|
|
||||||
|
// break out of the inner1 for loop, i.e. continue the outer loop.
|
||||||
|
JMP outer
|
||||||
|
|
||||||
|
emitRemainder:
|
||||||
|
// if nextEmit < len(src) { etc }
|
||||||
|
MOVQ src_len+32(FP), AX
|
||||||
|
ADDQ DX, AX
|
||||||
|
CMPQ R10, AX
|
||||||
|
JEQ encodeBlockEnd
|
||||||
|
|
||||||
|
// d += emitLiteral(dst[d:], src[nextEmit:])
|
||||||
|
//
|
||||||
|
// Push args.
|
||||||
|
MOVQ DI, 0(SP)
|
||||||
|
MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
|
||||||
|
MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
|
||||||
|
MOVQ R10, 24(SP)
|
||||||
|
SUBQ R10, AX
|
||||||
|
MOVQ AX, 32(SP)
|
||||||
|
MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
|
||||||
|
|
||||||
|
// Spill local variables (registers) onto the stack; call; unspill.
|
||||||
|
MOVQ DI, 80(SP)
|
||||||
|
CALL ·emitLiteral(SB)
|
||||||
|
MOVQ 80(SP), DI
|
||||||
|
|
||||||
|
// Finish the "d +=" part of "d += emitLiteral(etc)".
|
||||||
|
ADDQ 48(SP), DI
|
||||||
|
|
||||||
|
encodeBlockEnd:
|
||||||
|
MOVQ dst_base+0(FP), AX
|
||||||
|
SUBQ AX, DI
|
||||||
|
MOVQ DI, d+48(FP)
|
||||||
|
RET
|
238
vendor/github.com/golang/snappy/encode_other.go
generated
vendored
Normal file
238
vendor/github.com/golang/snappy/encode_other.go
generated
vendored
Normal file
|
@ -0,0 +1,238 @@
|
||||||
|
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !amd64 appengine !gc noasm
|
||||||
|
|
||||||
|
package snappy
|
||||||
|
|
||||||
|
func load32(b []byte, i int) uint32 {
|
||||||
|
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||||
|
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
||||||
|
}
|
||||||
|
|
||||||
|
func load64(b []byte, i int) uint64 {
|
||||||
|
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||||
|
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
|
||||||
|
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
||||||
|
}
|
||||||
|
|
||||||
|
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||||
|
//
|
||||||
|
// It assumes that:
|
||||||
|
// dst is long enough to hold the encoded bytes
|
||||||
|
// 1 <= len(lit) && len(lit) <= 65536
|
||||||
|
func emitLiteral(dst, lit []byte) int {
|
||||||
|
i, n := 0, uint(len(lit)-1)
|
||||||
|
switch {
|
||||||
|
case n < 60:
|
||||||
|
dst[0] = uint8(n)<<2 | tagLiteral
|
||||||
|
i = 1
|
||||||
|
case n < 1<<8:
|
||||||
|
dst[0] = 60<<2 | tagLiteral
|
||||||
|
dst[1] = uint8(n)
|
||||||
|
i = 2
|
||||||
|
default:
|
||||||
|
dst[0] = 61<<2 | tagLiteral
|
||||||
|
dst[1] = uint8(n)
|
||||||
|
dst[2] = uint8(n >> 8)
|
||||||
|
i = 3
|
||||||
|
}
|
||||||
|
return i + copy(dst[i:], lit)
|
||||||
|
}
|
||||||
|
|
||||||
|
// emitCopy writes a copy chunk and returns the number of bytes written.
|
||||||
|
//
|
||||||
|
// It assumes that:
|
||||||
|
// dst is long enough to hold the encoded bytes
|
||||||
|
// 1 <= offset && offset <= 65535
|
||||||
|
// 4 <= length && length <= 65535
|
||||||
|
func emitCopy(dst []byte, offset, length int) int {
|
||||||
|
i := 0
|
||||||
|
// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
|
||||||
|
// threshold for this loop is a little higher (at 68 = 64 + 4), and the
|
||||||
|
// length emitted down below is is a little lower (at 60 = 64 - 4), because
|
||||||
|
// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
|
||||||
|
// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
|
||||||
|
// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
|
||||||
|
// 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
|
||||||
|
// tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
|
||||||
|
// encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
|
||||||
|
for length >= 68 {
|
||||||
|
// Emit a length 64 copy, encoded as 3 bytes.
|
||||||
|
dst[i+0] = 63<<2 | tagCopy2
|
||||||
|
dst[i+1] = uint8(offset)
|
||||||
|
dst[i+2] = uint8(offset >> 8)
|
||||||
|
i += 3
|
||||||
|
length -= 64
|
||||||
|
}
|
||||||
|
if length > 64 {
|
||||||
|
// Emit a length 60 copy, encoded as 3 bytes.
|
||||||
|
dst[i+0] = 59<<2 | tagCopy2
|
||||||
|
dst[i+1] = uint8(offset)
|
||||||
|
dst[i+2] = uint8(offset >> 8)
|
||||||
|
i += 3
|
||||||
|
length -= 60
|
||||||
|
}
|
||||||
|
if length >= 12 || offset >= 2048 {
|
||||||
|
// Emit the remaining copy, encoded as 3 bytes.
|
||||||
|
dst[i+0] = uint8(length-1)<<2 | tagCopy2
|
||||||
|
dst[i+1] = uint8(offset)
|
||||||
|
dst[i+2] = uint8(offset >> 8)
|
||||||
|
return i + 3
|
||||||
|
}
|
||||||
|
// Emit the remaining copy, encoded as 2 bytes.
|
||||||
|
dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
|
||||||
|
dst[i+1] = uint8(offset)
|
||||||
|
return i + 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// extendMatch returns the largest k such that k <= len(src) and that
|
||||||
|
// src[i:i+k-j] and src[j:k] have the same contents.
|
||||||
|
//
|
||||||
|
// It assumes that:
|
||||||
|
// 0 <= i && i < j && j <= len(src)
|
||||||
|
func extendMatch(src []byte, i, j int) int {
|
||||||
|
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
|
||||||
|
}
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func hash(u, shift uint32) uint32 {
|
||||||
|
return (u * 0x1e35a7bd) >> shift
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||||
|
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||||
|
// been written.
|
||||||
|
//
|
||||||
|
// It also assumes that:
|
||||||
|
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||||
|
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||||
|
func encodeBlock(dst, src []byte) (d int) {
|
||||||
|
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
|
||||||
|
// The table element type is uint16, as s < sLimit and sLimit < len(src)
|
||||||
|
// and len(src) <= maxBlockSize and maxBlockSize == 65536.
|
||||||
|
const (
|
||||||
|
maxTableSize = 1 << 14
|
||||||
|
// tableMask is redundant, but helps the compiler eliminate bounds
|
||||||
|
// checks.
|
||||||
|
tableMask = maxTableSize - 1
|
||||||
|
)
|
||||||
|
shift := uint32(32 - 8)
|
||||||
|
for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
|
||||||
|
shift--
|
||||||
|
}
|
||||||
|
// In Go, all array elements are zero-initialized, so there is no advantage
|
||||||
|
// to a smaller tableSize per se. However, it matches the C++ algorithm,
|
||||||
|
// and in the asm versions of this code, we can get away with zeroing only
|
||||||
|
// the first tableSize elements.
|
||||||
|
var table [maxTableSize]uint16
|
||||||
|
|
||||||
|
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||||
|
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||||
|
// looking for copies.
|
||||||
|
sLimit := len(src) - inputMargin
|
||||||
|
|
||||||
|
// nextEmit is where in src the next emitLiteral should start from.
|
||||||
|
nextEmit := 0
|
||||||
|
|
||||||
|
// The encoded form must start with a literal, as there are no previous
|
||||||
|
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||||
|
s := 1
|
||||||
|
nextHash := hash(load32(src, s), shift)
|
||||||
|
|
||||||
|
for {
|
||||||
|
// Copied from the C++ snappy implementation:
|
||||||
|
//
|
||||||
|
// Heuristic match skipping: If 32 bytes are scanned with no matches
|
||||||
|
// found, start looking only at every other byte. If 32 more bytes are
|
||||||
|
// scanned (or skipped), look at every third byte, etc.. When a match
|
||||||
|
// is found, immediately go back to looking at every byte. This is a
|
||||||
|
// small loss (~5% performance, ~0.1% density) for compressible data
|
||||||
|
// due to more bookkeeping, but for non-compressible data (such as
|
||||||
|
// JPEG) it's a huge win since the compressor quickly "realizes" the
|
||||||
|
// data is incompressible and doesn't bother looking for matches
|
||||||
|
// everywhere.
|
||||||
|
//
|
||||||
|
// The "skip" variable keeps track of how many bytes there are since
|
||||||
|
// the last match; dividing it by 32 (ie. right-shifting by five) gives
|
||||||
|
// the number of bytes to move ahead for each iteration.
|
||||||
|
skip := 32
|
||||||
|
|
||||||
|
nextS := s
|
||||||
|
candidate := 0
|
||||||
|
for {
|
||||||
|
s = nextS
|
||||||
|
bytesBetweenHashLookups := skip >> 5
|
||||||
|
nextS = s + bytesBetweenHashLookups
|
||||||
|
skip += bytesBetweenHashLookups
|
||||||
|
if nextS > sLimit {
|
||||||
|
goto emitRemainder
|
||||||
|
}
|
||||||
|
candidate = int(table[nextHash&tableMask])
|
||||||
|
table[nextHash&tableMask] = uint16(s)
|
||||||
|
nextHash = hash(load32(src, nextS), shift)
|
||||||
|
if load32(src, s) == load32(src, candidate) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||||
|
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||||
|
// them as literal bytes.
|
||||||
|
d += emitLiteral(dst[d:], src[nextEmit:s])
|
||||||
|
|
||||||
|
// Call emitCopy, and then see if another emitCopy could be our next
|
||||||
|
// move. Repeat until we find no match for the input immediately after
|
||||||
|
// what was consumed by the last emitCopy call.
|
||||||
|
//
|
||||||
|
// If we exit this loop normally then we need to call emitLiteral next,
|
||||||
|
// though we don't yet know how big the literal will be. We handle that
|
||||||
|
// by proceeding to the next iteration of the main loop. We also can
|
||||||
|
// exit this loop via goto if we get close to exhausting the input.
|
||||||
|
for {
|
||||||
|
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||||
|
// literal bytes prior to s.
|
||||||
|
base := s
|
||||||
|
|
||||||
|
// Extend the 4-byte match as long as possible.
|
||||||
|
//
|
||||||
|
// This is an inlined version of:
|
||||||
|
// s = extendMatch(src, candidate+4, s+4)
|
||||||
|
s += 4
|
||||||
|
for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
|
||||||
|
}
|
||||||
|
|
||||||
|
d += emitCopy(dst[d:], base-candidate, s-base)
|
||||||
|
nextEmit = s
|
||||||
|
if s >= sLimit {
|
||||||
|
goto emitRemainder
|
||||||
|
}
|
||||||
|
|
||||||
|
// We could immediately start working at s now, but to improve
|
||||||
|
// compression we first update the hash table at s-1 and at s. If
|
||||||
|
// another emitCopy is not our next move, also calculate nextHash
|
||||||
|
// at s+1. At least on GOARCH=amd64, these three hash calculations
|
||||||
|
// are faster as one load64 call (with some shifts) instead of
|
||||||
|
// three load32 calls.
|
||||||
|
x := load64(src, s-1)
|
||||||
|
prevHash := hash(uint32(x>>0), shift)
|
||||||
|
table[prevHash&tableMask] = uint16(s - 1)
|
||||||
|
currHash := hash(uint32(x>>8), shift)
|
||||||
|
candidate = int(table[currHash&tableMask])
|
||||||
|
table[currHash&tableMask] = uint16(s)
|
||||||
|
if uint32(x>>8) != load32(src, candidate) {
|
||||||
|
nextHash = hash(uint32(x>>16), shift)
|
||||||
|
s++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
emitRemainder:
|
||||||
|
if nextEmit < len(src) {
|
||||||
|
d += emitLiteral(dst[d:], src[nextEmit:])
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
98
vendor/github.com/golang/snappy/snappy.go
generated
vendored
Normal file
98
vendor/github.com/golang/snappy/snappy.go
generated
vendored
Normal file
|
@ -0,0 +1,98 @@
|
||||||
|
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package snappy implements the Snappy compression format. It aims for very
|
||||||
|
// high speeds and reasonable compression.
|
||||||
|
//
|
||||||
|
// There are actually two Snappy formats: block and stream. They are related,
|
||||||
|
// but different: trying to decompress block-compressed data as a Snappy stream
|
||||||
|
// will fail, and vice versa. The block format is the Decode and Encode
|
||||||
|
// functions and the stream format is the Reader and Writer types.
|
||||||
|
//
|
||||||
|
// The block format, the more common case, is used when the complete size (the
|
||||||
|
// number of bytes) of the original data is known upfront, at the time
|
||||||
|
// compression starts. The stream format, also known as the framing format, is
|
||||||
|
// for when that isn't always true.
|
||||||
|
//
|
||||||
|
// The canonical, C++ implementation is at https://github.com/google/snappy and
|
||||||
|
// it only implements the block format.
|
||||||
|
package snappy // import "github.com/golang/snappy"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"hash/crc32"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Each encoded block begins with the varint-encoded length of the decoded data,
|
||||||
|
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
|
||||||
|
first byte of each chunk is broken into its 2 least and 6 most significant bits
|
||||||
|
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
|
||||||
|
Zero means a literal tag. All other values mean a copy tag.
|
||||||
|
|
||||||
|
For literal tags:
|
||||||
|
- If m < 60, the next 1 + m bytes are literal bytes.
|
||||||
|
- Otherwise, let n be the little-endian unsigned integer denoted by the next
|
||||||
|
m - 59 bytes. The next 1 + n bytes after that are literal bytes.
|
||||||
|
|
||||||
|
For copy tags, length bytes are copied from offset bytes ago, in the style of
|
||||||
|
Lempel-Ziv compression algorithms. In particular:
|
||||||
|
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
|
||||||
|
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
|
||||||
|
of the offset. The next byte is bits 0-7 of the offset.
|
||||||
|
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
|
||||||
|
The length is 1 + m. The offset is the little-endian unsigned integer
|
||||||
|
denoted by the next 2 bytes.
|
||||||
|
- For l == 3, this tag is a legacy format that is no longer issued by most
|
||||||
|
encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
|
||||||
|
[1, 65). The length is 1 + m. The offset is the little-endian unsigned
|
||||||
|
integer denoted by the next 4 bytes.
|
||||||
|
*/
|
||||||
|
const (
|
||||||
|
tagLiteral = 0x00
|
||||||
|
tagCopy1 = 0x01
|
||||||
|
tagCopy2 = 0x02
|
||||||
|
tagCopy4 = 0x03
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
checksumSize = 4
|
||||||
|
chunkHeaderSize = 4
|
||||||
|
magicChunk = "\xff\x06\x00\x00" + magicBody
|
||||||
|
magicBody = "sNaPpY"
|
||||||
|
|
||||||
|
// maxBlockSize is the maximum size of the input to encodeBlock. It is not
|
||||||
|
// part of the wire format per se, but some parts of the encoder assume
|
||||||
|
// that an offset fits into a uint16.
|
||||||
|
//
|
||||||
|
// Also, for the framing format (Writer type instead of Encode function),
|
||||||
|
// https://github.com/google/snappy/blob/master/framing_format.txt says
|
||||||
|
// that "the uncompressed data in a chunk must be no longer than 65536
|
||||||
|
// bytes".
|
||||||
|
maxBlockSize = 65536
|
||||||
|
|
||||||
|
// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
|
||||||
|
// hard coded to be a const instead of a variable, so that obufLen can also
|
||||||
|
// be a const. Their equivalence is confirmed by
|
||||||
|
// TestMaxEncodedLenOfMaxBlockSize.
|
||||||
|
maxEncodedLenOfMaxBlockSize = 76490
|
||||||
|
|
||||||
|
obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
|
||||||
|
obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
chunkTypeCompressedData = 0x00
|
||||||
|
chunkTypeUncompressedData = 0x01
|
||||||
|
chunkTypePadding = 0xfe
|
||||||
|
chunkTypeStreamIdentifier = 0xff
|
||||||
|
)
|
||||||
|
|
||||||
|
var crcTable = crc32.MakeTable(crc32.Castagnoli)
|
||||||
|
|
||||||
|
// crc implements the checksum specified in section 3 of
|
||||||
|
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||||
|
func crc(b []byte) uint32 {
|
||||||
|
c := crc32.Update(0, crcTable, b)
|
||||||
|
return uint32(c>>15|c<<17) + 0xa282ead8
|
||||||
|
}
|
89
vendor/github.com/hashicorp/errwrap/README.md
generated
vendored
89
vendor/github.com/hashicorp/errwrap/README.md
generated
vendored
|
@ -1,89 +0,0 @@
|
||||||
# errwrap
|
|
||||||
|
|
||||||
`errwrap` is a package for Go that formalizes the pattern of wrapping errors
|
|
||||||
and checking if an error contains another error.
|
|
||||||
|
|
||||||
There is a common pattern in Go of taking a returned `error` value and
|
|
||||||
then wrapping it (such as with `fmt.Errorf`) before returning it. The problem
|
|
||||||
with this pattern is that you completely lose the original `error` structure.
|
|
||||||
|
|
||||||
Arguably the _correct_ approach is that you should make a custom structure
|
|
||||||
implementing the `error` interface, and have the original error as a field
|
|
||||||
on that structure, such [as this example](http://golang.org/pkg/os/#PathError).
|
|
||||||
This is a good approach, but you have to know the entire chain of possible
|
|
||||||
rewrapping that happens, when you might just care about one.
|
|
||||||
|
|
||||||
`errwrap` formalizes this pattern (it doesn't matter what approach you use
|
|
||||||
above) by giving a single interface for wrapping errors, checking if a specific
|
|
||||||
error is wrapped, and extracting that error.
|
|
||||||
|
|
||||||
## Installation and Docs
|
|
||||||
|
|
||||||
Install using `go get github.com/hashicorp/errwrap`.
|
|
||||||
|
|
||||||
Full documentation is available at
|
|
||||||
http://godoc.org/github.com/hashicorp/errwrap
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
#### Basic Usage
|
|
||||||
|
|
||||||
Below is a very basic example of its usage:
|
|
||||||
|
|
||||||
```go
|
|
||||||
// A function that always returns an error, but wraps it, like a real
|
|
||||||
// function might.
|
|
||||||
func tryOpen() error {
|
|
||||||
_, err := os.Open("/i/dont/exist")
|
|
||||||
if err != nil {
|
|
||||||
return errwrap.Wrapf("Doesn't exist: {{err}}", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
err := tryOpen()
|
|
||||||
|
|
||||||
// We can use the Contains helpers to check if an error contains
|
|
||||||
// another error. It is safe to do this with a nil error, or with
|
|
||||||
// an error that doesn't even use the errwrap package.
|
|
||||||
if errwrap.Contains(err, ErrNotExist) {
|
|
||||||
// Do something
|
|
||||||
}
|
|
||||||
if errwrap.ContainsType(err, new(os.PathError)) {
|
|
||||||
// Do something
|
|
||||||
}
|
|
||||||
|
|
||||||
// Or we can use the associated `Get` functions to just extract
|
|
||||||
// a specific error. This would return nil if that specific error doesn't
|
|
||||||
// exist.
|
|
||||||
perr := errwrap.GetType(err, new(os.PathError))
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Custom Types
|
|
||||||
|
|
||||||
If you're already making custom types that properly wrap errors, then
|
|
||||||
you can get all the functionality of `errwraps.Contains` and such by
|
|
||||||
implementing the `Wrapper` interface with just one function. Example:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type AppError {
|
|
||||||
Code ErrorCode
|
|
||||||
Err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *AppError) WrappedErrors() []error {
|
|
||||||
return []error{e.Err}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Now this works:
|
|
||||||
|
|
||||||
```go
|
|
||||||
err := &AppError{Err: fmt.Errorf("an error")}
|
|
||||||
if errwrap.ContainsType(err, fmt.Errorf("")) {
|
|
||||||
// This will work!
|
|
||||||
}
|
|
||||||
```
|
|
30
vendor/github.com/hashicorp/go-cleanhttp/README.md
generated
vendored
30
vendor/github.com/hashicorp/go-cleanhttp/README.md
generated
vendored
|
@ -1,30 +0,0 @@
|
||||||
# cleanhttp
|
|
||||||
|
|
||||||
Functions for accessing "clean" Go http.Client values
|
|
||||||
|
|
||||||
-------------
|
|
||||||
|
|
||||||
The Go standard library contains a default `http.Client` called
|
|
||||||
`http.DefaultClient`. It is a common idiom in Go code to start with
|
|
||||||
`http.DefaultClient` and tweak it as necessary, and in fact, this is
|
|
||||||
encouraged; from the `http` package documentation:
|
|
||||||
|
|
||||||
> The Client's Transport typically has internal state (cached TCP connections),
|
|
||||||
so Clients should be reused instead of created as needed. Clients are safe for
|
|
||||||
concurrent use by multiple goroutines.
|
|
||||||
|
|
||||||
Unfortunately, this is a shared value, and it is not uncommon for libraries to
|
|
||||||
assume that they are free to modify it at will. With enough dependencies, it
|
|
||||||
can be very easy to encounter strange problems and race conditions due to
|
|
||||||
manipulation of this shared value across libraries and goroutines (clients are
|
|
||||||
safe for concurrent use, but writing values to the client struct itself is not
|
|
||||||
protected).
|
|
||||||
|
|
||||||
Making things worse is the fact that a bare `http.Client` will use a default
|
|
||||||
`http.Transport` called `http.DefaultTransport`, which is another global value
|
|
||||||
that behaves the same way. So it is not simply enough to replace
|
|
||||||
`http.DefaultClient` with `&http.Client{}`.
|
|
||||||
|
|
||||||
This repository provides some simple functions to get a "clean" `http.Client`
|
|
||||||
-- one that uses the same default values as the Go standard library, but
|
|
||||||
returns a client that does not share any state with other clients.
|
|
24
vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
generated
vendored
24
vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
generated
vendored
|
@ -3,11 +3,12 @@ package cleanhttp
|
||||||
import (
|
import (
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"runtime"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DefaultTransport returns a new http.Transport with the same default values
|
// DefaultTransport returns a new http.Transport with similar default values to
|
||||||
// as http.DefaultTransport, but with idle connections and keepalives disabled.
|
// http.DefaultTransport, but with idle connections and keepalives disabled.
|
||||||
func DefaultTransport() *http.Transport {
|
func DefaultTransport() *http.Transport {
|
||||||
transport := DefaultPooledTransport()
|
transport := DefaultPooledTransport()
|
||||||
transport.DisableKeepAlives = true
|
transport.DisableKeepAlives = true
|
||||||
|
@ -22,13 +23,16 @@ func DefaultTransport() *http.Transport {
|
||||||
func DefaultPooledTransport() *http.Transport {
|
func DefaultPooledTransport() *http.Transport {
|
||||||
transport := &http.Transport{
|
transport := &http.Transport{
|
||||||
Proxy: http.ProxyFromEnvironment,
|
Proxy: http.ProxyFromEnvironment,
|
||||||
Dial: (&net.Dialer{
|
DialContext: (&net.Dialer{
|
||||||
Timeout: 30 * time.Second,
|
Timeout: 30 * time.Second,
|
||||||
KeepAlive: 30 * time.Second,
|
KeepAlive: 30 * time.Second,
|
||||||
}).Dial,
|
DualStack: true,
|
||||||
|
}).DialContext,
|
||||||
|
MaxIdleConns: 100,
|
||||||
|
IdleConnTimeout: 90 * time.Second,
|
||||||
TLSHandshakeTimeout: 10 * time.Second,
|
TLSHandshakeTimeout: 10 * time.Second,
|
||||||
DisableKeepAlives: false,
|
ExpectContinueTimeout: 1 * time.Second,
|
||||||
MaxIdleConnsPerHost: 1,
|
MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1,
|
||||||
}
|
}
|
||||||
return transport
|
return transport
|
||||||
}
|
}
|
||||||
|
@ -42,10 +46,10 @@ func DefaultClient() *http.Client {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultPooledClient returns a new http.Client with the same default values
|
// DefaultPooledClient returns a new http.Client with similar default values to
|
||||||
// as http.Client, but with a shared Transport. Do not use this function
|
// http.Client, but with a shared Transport. Do not use this function for
|
||||||
// for transient clients as it can leak file descriptors over time. Only use
|
// transient clients as it can leak file descriptors over time. Only use this
|
||||||
// this for clients that will be re-used for the same host(s).
|
// for clients that will be re-used for the same host(s).
|
||||||
func DefaultPooledClient() *http.Client {
|
func DefaultPooledClient() *http.Client {
|
||||||
return &http.Client{
|
return &http.Client{
|
||||||
Transport: DefaultPooledTransport(),
|
Transport: DefaultPooledTransport(),
|
||||||
|
|
43
vendor/github.com/hashicorp/go-cleanhttp/handlers.go
generated
vendored
Normal file
43
vendor/github.com/hashicorp/go-cleanhttp/handlers.go
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
package cleanhttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HandlerInput provides input options to cleanhttp's handlers
|
||||||
|
type HandlerInput struct {
|
||||||
|
ErrStatus int
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrintablePathCheckHandler is a middleware that ensures the request path
|
||||||
|
// contains only printable runes.
|
||||||
|
func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler {
|
||||||
|
// Nil-check on input to make it optional
|
||||||
|
if input == nil {
|
||||||
|
input = &HandlerInput{
|
||||||
|
ErrStatus: http.StatusBadRequest,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default to http.StatusBadRequest on error
|
||||||
|
if input.ErrStatus == 0 {
|
||||||
|
input.ErrStatus = http.StatusBadRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// Check URL path for non-printable characters
|
||||||
|
idx := strings.IndexFunc(r.URL.Path, func(c rune) bool {
|
||||||
|
return !unicode.IsPrint(c)
|
||||||
|
})
|
||||||
|
|
||||||
|
if idx != -1 {
|
||||||
|
w.WriteHeader(input.ErrStatus)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
return
|
||||||
|
})
|
||||||
|
}
|
91
vendor/github.com/hashicorp/go-multierror/README.md
generated
vendored
91
vendor/github.com/hashicorp/go-multierror/README.md
generated
vendored
|
@ -1,91 +0,0 @@
|
||||||
# go-multierror
|
|
||||||
|
|
||||||
`go-multierror` is a package for Go that provides a mechanism for
|
|
||||||
representing a list of `error` values as a single `error`.
|
|
||||||
|
|
||||||
This allows a function in Go to return an `error` that might actually
|
|
||||||
be a list of errors. If the caller knows this, they can unwrap the
|
|
||||||
list and access the errors. If the caller doesn't know, the error
|
|
||||||
formats to a nice human-readable format.
|
|
||||||
|
|
||||||
`go-multierror` implements the
|
|
||||||
[errwrap](https://github.com/hashicorp/errwrap) interface so that it can
|
|
||||||
be used with that library, as well.
|
|
||||||
|
|
||||||
## Installation and Docs
|
|
||||||
|
|
||||||
Install using `go get github.com/hashicorp/go-multierror`.
|
|
||||||
|
|
||||||
Full documentation is available at
|
|
||||||
http://godoc.org/github.com/hashicorp/go-multierror
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
go-multierror is easy to use and purposely built to be unobtrusive in
|
|
||||||
existing Go applications/libraries that may not be aware of it.
|
|
||||||
|
|
||||||
**Building a list of errors**
|
|
||||||
|
|
||||||
The `Append` function is used to create a list of errors. This function
|
|
||||||
behaves a lot like the Go built-in `append` function: it doesn't matter
|
|
||||||
if the first argument is nil, a `multierror.Error`, or any other `error`,
|
|
||||||
the function behaves as you would expect.
|
|
||||||
|
|
||||||
```go
|
|
||||||
var result error
|
|
||||||
|
|
||||||
if err := step1(); err != nil {
|
|
||||||
result = multierror.Append(result, err)
|
|
||||||
}
|
|
||||||
if err := step2(); err != nil {
|
|
||||||
result = multierror.Append(result, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
```
|
|
||||||
|
|
||||||
**Customizing the formatting of the errors**
|
|
||||||
|
|
||||||
By specifying a custom `ErrorFormat`, you can customize the format
|
|
||||||
of the `Error() string` function:
|
|
||||||
|
|
||||||
```go
|
|
||||||
var result *multierror.Error
|
|
||||||
|
|
||||||
// ... accumulate errors here, maybe using Append
|
|
||||||
|
|
||||||
if result != nil {
|
|
||||||
result.ErrorFormat = func([]error) string {
|
|
||||||
return "errors!"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
**Accessing the list of errors**
|
|
||||||
|
|
||||||
`multierror.Error` implements `error` so if the caller doesn't know about
|
|
||||||
multierror, it will work just fine. But if you're aware a multierror might
|
|
||||||
be returned, you can use type switches to access the list of errors:
|
|
||||||
|
|
||||||
```go
|
|
||||||
if err := something(); err != nil {
|
|
||||||
if merr, ok := err.(*multierror.Error); ok {
|
|
||||||
// Use merr.Errors
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
**Returning a multierror only if there are errors**
|
|
||||||
|
|
||||||
If you build a `multierror.Error`, you can use the `ErrorOrNil` function
|
|
||||||
to return an `error` implementation only if there are errors to return:
|
|
||||||
|
|
||||||
```go
|
|
||||||
var result *multierror.Error
|
|
||||||
|
|
||||||
// ... accumulate errors here
|
|
||||||
|
|
||||||
// Return the `error` only if errors were added to the multierror, otherwise
|
|
||||||
// return nil since there are no errors.
|
|
||||||
return result.ErrorOrNil()
|
|
||||||
```
|
|
4
vendor/github.com/hashicorp/go-multierror/append.go
generated
vendored
4
vendor/github.com/hashicorp/go-multierror/append.go
generated
vendored
|
@ -18,11 +18,15 @@ func Append(err error, errs ...error) *Error {
|
||||||
for _, e := range errs {
|
for _, e := range errs {
|
||||||
switch e := e.(type) {
|
switch e := e.(type) {
|
||||||
case *Error:
|
case *Error:
|
||||||
|
if e != nil {
|
||||||
err.Errors = append(err.Errors, e.Errors...)
|
err.Errors = append(err.Errors, e.Errors...)
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
|
if e != nil {
|
||||||
err.Errors = append(err.Errors, e)
|
err.Errors = append(err.Errors, e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
default:
|
default:
|
||||||
|
|
8
vendor/github.com/hashicorp/go-multierror/format.go
generated
vendored
8
vendor/github.com/hashicorp/go-multierror/format.go
generated
vendored
|
@ -12,12 +12,16 @@ type ErrorFormatFunc func([]error) string
|
||||||
// ListFormatFunc is a basic formatter that outputs the number of errors
|
// ListFormatFunc is a basic formatter that outputs the number of errors
|
||||||
// that occurred along with a bullet point list of the errors.
|
// that occurred along with a bullet point list of the errors.
|
||||||
func ListFormatFunc(es []error) string {
|
func ListFormatFunc(es []error) string {
|
||||||
|
if len(es) == 1 {
|
||||||
|
return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0])
|
||||||
|
}
|
||||||
|
|
||||||
points := make([]string, len(es))
|
points := make([]string, len(es))
|
||||||
for i, err := range es {
|
for i, err := range es {
|
||||||
points[i] = fmt.Sprintf("* %s", err)
|
points[i] = fmt.Sprintf("* %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
"%d error(s) occurred:\n\n%s",
|
"%d errors occurred:\n\t%s\n\n",
|
||||||
len(es), strings.Join(points, "\n"))
|
len(es), strings.Join(points, "\n\t"))
|
||||||
}
|
}
|
||||||
|
|
4
vendor/github.com/hashicorp/go-multierror/multierror.go
generated
vendored
4
vendor/github.com/hashicorp/go-multierror/multierror.go
generated
vendored
|
@ -40,11 +40,11 @@ func (e *Error) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// WrappedErrors returns the list of errors that this Error is wrapping.
|
// WrappedErrors returns the list of errors that this Error is wrapping.
|
||||||
// It is an implementatin of the errwrap.Wrapper interface so that
|
// It is an implementation of the errwrap.Wrapper interface so that
|
||||||
// multierror.Error can be used with that library.
|
// multierror.Error can be used with that library.
|
||||||
//
|
//
|
||||||
// This method is not safe to be called concurrently and is no different
|
// This method is not safe to be called concurrently and is no different
|
||||||
// than accessing the Errors field directly. It is implementd only to
|
// than accessing the Errors field directly. It is implemented only to
|
||||||
// satisfy the errwrap.Wrapper interface.
|
// satisfy the errwrap.Wrapper interface.
|
||||||
func (e *Error) WrappedErrors() []error {
|
func (e *Error) WrappedErrors() []error {
|
||||||
return e.Errors
|
return e.Errors
|
||||||
|
|
16
vendor/github.com/hashicorp/go-multierror/sort.go
generated
vendored
Normal file
16
vendor/github.com/hashicorp/go-multierror/sort.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
package multierror
|
||||||
|
|
||||||
|
// Len implements sort.Interface function for length
|
||||||
|
func (err Error) Len() int {
|
||||||
|
return len(err.Errors)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Swap implements sort.Interface function for swapping elements
|
||||||
|
func (err Error) Swap(i, j int) {
|
||||||
|
err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Less implements sort.Interface function for determining order
|
||||||
|
func (err Error) Less(i, j int) bool {
|
||||||
|
return err.Errors[i].Error() < err.Errors[j].Error()
|
||||||
|
}
|
363
vendor/github.com/hashicorp/go-retryablehttp/LICENSE
generated
vendored
Normal file
363
vendor/github.com/hashicorp/go-retryablehttp/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,363 @@
|
||||||
|
Mozilla Public License, version 2.0
|
||||||
|
|
||||||
|
1. Definitions
|
||||||
|
|
||||||
|
1.1. "Contributor"
|
||||||
|
|
||||||
|
means each individual or legal entity that creates, contributes to the
|
||||||
|
creation of, or owns Covered Software.
|
||||||
|
|
||||||
|
1.2. "Contributor Version"
|
||||||
|
|
||||||
|
means the combination of the Contributions of others (if any) used by a
|
||||||
|
Contributor and that particular Contributor's Contribution.
|
||||||
|
|
||||||
|
1.3. "Contribution"
|
||||||
|
|
||||||
|
means Covered Software of a particular Contributor.
|
||||||
|
|
||||||
|
1.4. "Covered Software"
|
||||||
|
|
||||||
|
means Source Code Form to which the initial Contributor has attached the
|
||||||
|
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||||
|
Modifications of such Source Code Form, in each case including portions
|
||||||
|
thereof.
|
||||||
|
|
||||||
|
1.5. "Incompatible With Secondary Licenses"
|
||||||
|
means
|
||||||
|
|
||||||
|
a. that the initial Contributor has attached the notice described in
|
||||||
|
Exhibit B to the Covered Software; or
|
||||||
|
|
||||||
|
b. that the Covered Software was made available under the terms of
|
||||||
|
version 1.1 or earlier of the License, but not also under the terms of
|
||||||
|
a Secondary License.
|
||||||
|
|
||||||
|
1.6. "Executable Form"
|
||||||
|
|
||||||
|
means any form of the work other than Source Code Form.
|
||||||
|
|
||||||
|
1.7. "Larger Work"
|
||||||
|
|
||||||
|
means a work that combines Covered Software with other material, in a
|
||||||
|
separate file or files, that is not Covered Software.
|
||||||
|
|
||||||
|
1.8. "License"
|
||||||
|
|
||||||
|
means this document.
|
||||||
|
|
||||||
|
1.9. "Licensable"
|
||||||
|
|
||||||
|
means having the right to grant, to the maximum extent possible, whether
|
||||||
|
at the time of the initial grant or subsequently, any and all of the
|
||||||
|
rights conveyed by this License.
|
||||||
|
|
||||||
|
1.10. "Modifications"
|
||||||
|
|
||||||
|
means any of the following:
|
||||||
|
|
||||||
|
a. any file in Source Code Form that results from an addition to,
|
||||||
|
deletion from, or modification of the contents of Covered Software; or
|
||||||
|
|
||||||
|
b. any new file in Source Code Form that contains any Covered Software.
|
||||||
|
|
||||||
|
1.11. "Patent Claims" of a Contributor
|
||||||
|
|
||||||
|
means any patent claim(s), including without limitation, method,
|
||||||
|
process, and apparatus claims, in any patent Licensable by such
|
||||||
|
Contributor that would be infringed, but for the grant of the License,
|
||||||
|
by the making, using, selling, offering for sale, having made, import,
|
||||||
|
or transfer of either its Contributions or its Contributor Version.
|
||||||
|
|
||||||
|
1.12. "Secondary License"
|
||||||
|
|
||||||
|
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||||
|
General Public License, Version 2.1, the GNU Affero General Public
|
||||||
|
License, Version 3.0, or any later versions of those licenses.
|
||||||
|
|
||||||
|
1.13. "Source Code Form"
|
||||||
|
|
||||||
|
means the form of the work preferred for making modifications.
|
||||||
|
|
||||||
|
1.14. "You" (or "Your")
|
||||||
|
|
||||||
|
means an individual or a legal entity exercising rights under this
|
||||||
|
License. For legal entities, "You" includes any entity that controls, is
|
||||||
|
controlled by, or is under common control with You. For purposes of this
|
||||||
|
definition, "control" means (a) the power, direct or indirect, to cause
|
||||||
|
the direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||||
|
outstanding shares or beneficial ownership of such entity.
|
||||||
|
|
||||||
|
|
||||||
|
2. License Grants and Conditions
|
||||||
|
|
||||||
|
2.1. Grants
|
||||||
|
|
||||||
|
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||||
|
non-exclusive license:
|
||||||
|
|
||||||
|
a. under intellectual property rights (other than patent or trademark)
|
||||||
|
Licensable by such Contributor to use, reproduce, make available,
|
||||||
|
modify, display, perform, distribute, and otherwise exploit its
|
||||||
|
Contributions, either on an unmodified basis, with Modifications, or
|
||||||
|
as part of a Larger Work; and
|
||||||
|
|
||||||
|
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||||
|
sale, have made, import, and otherwise transfer either its
|
||||||
|
Contributions or its Contributor Version.
|
||||||
|
|
||||||
|
2.2. Effective Date
|
||||||
|
|
||||||
|
The licenses granted in Section 2.1 with respect to any Contribution
|
||||||
|
become effective for each Contribution on the date the Contributor first
|
||||||
|
distributes such Contribution.
|
||||||
|
|
||||||
|
2.3. Limitations on Grant Scope
|
||||||
|
|
||||||
|
The licenses granted in this Section 2 are the only rights granted under
|
||||||
|
this License. No additional rights or licenses will be implied from the
|
||||||
|
distribution or licensing of Covered Software under this License.
|
||||||
|
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||||
|
Contributor:
|
||||||
|
|
||||||
|
a. for any code that a Contributor has removed from Covered Software; or
|
||||||
|
|
||||||
|
b. for infringements caused by: (i) Your and any other third party's
|
||||||
|
modifications of Covered Software, or (ii) the combination of its
|
||||||
|
Contributions with other software (except as part of its Contributor
|
||||||
|
Version); or
|
||||||
|
|
||||||
|
c. under Patent Claims infringed by Covered Software in the absence of
|
||||||
|
its Contributions.
|
||||||
|
|
||||||
|
This License does not grant any rights in the trademarks, service marks,
|
||||||
|
or logos of any Contributor (except as may be necessary to comply with
|
||||||
|
the notice requirements in Section 3.4).
|
||||||
|
|
||||||
|
2.4. Subsequent Licenses
|
||||||
|
|
||||||
|
No Contributor makes additional grants as a result of Your choice to
|
||||||
|
distribute the Covered Software under a subsequent version of this
|
||||||
|
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||||
|
permitted under the terms of Section 3.3).
|
||||||
|
|
||||||
|
2.5. Representation
|
||||||
|
|
||||||
|
Each Contributor represents that the Contributor believes its
|
||||||
|
Contributions are its original creation(s) or it has sufficient rights to
|
||||||
|
grant the rights to its Contributions conveyed by this License.
|
||||||
|
|
||||||
|
2.6. Fair Use
|
||||||
|
|
||||||
|
This License is not intended to limit any rights You have under
|
||||||
|
applicable copyright doctrines of fair use, fair dealing, or other
|
||||||
|
equivalents.
|
||||||
|
|
||||||
|
2.7. Conditions
|
||||||
|
|
||||||
|
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||||
|
Section 2.1.
|
||||||
|
|
||||||
|
|
||||||
|
3. Responsibilities
|
||||||
|
|
||||||
|
3.1. Distribution of Source Form
|
||||||
|
|
||||||
|
All distribution of Covered Software in Source Code Form, including any
|
||||||
|
Modifications that You create or to which You contribute, must be under
|
||||||
|
the terms of this License. You must inform recipients that the Source
|
||||||
|
Code Form of the Covered Software is governed by the terms of this
|
||||||
|
License, and how they can obtain a copy of this License. You may not
|
||||||
|
attempt to alter or restrict the recipients' rights in the Source Code
|
||||||
|
Form.
|
||||||
|
|
||||||
|
3.2. Distribution of Executable Form
|
||||||
|
|
||||||
|
If You distribute Covered Software in Executable Form then:
|
||||||
|
|
||||||
|
a. such Covered Software must also be made available in Source Code Form,
|
||||||
|
as described in Section 3.1, and You must inform recipients of the
|
||||||
|
Executable Form how they can obtain a copy of such Source Code Form by
|
||||||
|
reasonable means in a timely manner, at a charge no more than the cost
|
||||||
|
of distribution to the recipient; and
|
||||||
|
|
||||||
|
b. You may distribute such Executable Form under the terms of this
|
||||||
|
License, or sublicense it under different terms, provided that the
|
||||||
|
license for the Executable Form does not attempt to limit or alter the
|
||||||
|
recipients' rights in the Source Code Form under this License.
|
||||||
|
|
||||||
|
3.3. Distribution of a Larger Work
|
||||||
|
|
||||||
|
You may create and distribute a Larger Work under terms of Your choice,
|
||||||
|
provided that You also comply with the requirements of this License for
|
||||||
|
the Covered Software. If the Larger Work is a combination of Covered
|
||||||
|
Software with a work governed by one or more Secondary Licenses, and the
|
||||||
|
Covered Software is not Incompatible With Secondary Licenses, this
|
||||||
|
License permits You to additionally distribute such Covered Software
|
||||||
|
under the terms of such Secondary License(s), so that the recipient of
|
||||||
|
the Larger Work may, at their option, further distribute the Covered
|
||||||
|
Software under the terms of either this License or such Secondary
|
||||||
|
License(s).
|
||||||
|
|
||||||
|
3.4. Notices
|
||||||
|
|
||||||
|
You may not remove or alter the substance of any license notices
|
||||||
|
(including copyright notices, patent notices, disclaimers of warranty, or
|
||||||
|
limitations of liability) contained within the Source Code Form of the
|
||||||
|
Covered Software, except that You may alter any license notices to the
|
||||||
|
extent required to remedy known factual inaccuracies.
|
||||||
|
|
||||||
|
3.5. Application of Additional Terms
|
||||||
|
|
||||||
|
You may choose to offer, and to charge a fee for, warranty, support,
|
||||||
|
indemnity or liability obligations to one or more recipients of Covered
|
||||||
|
Software. However, You may do so only on Your own behalf, and not on
|
||||||
|
behalf of any Contributor. You must make it absolutely clear that any
|
||||||
|
such warranty, support, indemnity, or liability obligation is offered by
|
||||||
|
You alone, and You hereby agree to indemnify every Contributor for any
|
||||||
|
liability incurred by such Contributor as a result of warranty, support,
|
||||||
|
indemnity or liability terms You offer. You may include additional
|
||||||
|
disclaimers of warranty and limitations of liability specific to any
|
||||||
|
jurisdiction.
|
||||||
|
|
||||||
|
4. Inability to Comply Due to Statute or Regulation
|
||||||
|
|
||||||
|
If it is impossible for You to comply with any of the terms of this License
|
||||||
|
with respect to some or all of the Covered Software due to statute,
|
||||||
|
judicial order, or regulation then You must: (a) comply with the terms of
|
||||||
|
this License to the maximum extent possible; and (b) describe the
|
||||||
|
limitations and the code they affect. Such description must be placed in a
|
||||||
|
text file included with all distributions of the Covered Software under
|
||||||
|
this License. Except to the extent prohibited by statute or regulation,
|
||||||
|
such description must be sufficiently detailed for a recipient of ordinary
|
||||||
|
skill to be able to understand it.
|
||||||
|
|
||||||
|
5. Termination
|
||||||
|
|
||||||
|
5.1. The rights granted under this License will terminate automatically if You
|
||||||
|
fail to comply with any of its terms. However, if You become compliant,
|
||||||
|
then the rights granted under this License from a particular Contributor
|
||||||
|
are reinstated (a) provisionally, unless and until such Contributor
|
||||||
|
explicitly and finally terminates Your grants, and (b) on an ongoing
|
||||||
|
basis, if such Contributor fails to notify You of the non-compliance by
|
||||||
|
some reasonable means prior to 60 days after You have come back into
|
||||||
|
compliance. Moreover, Your grants from a particular Contributor are
|
||||||
|
reinstated on an ongoing basis if such Contributor notifies You of the
|
||||||
|
non-compliance by some reasonable means, this is the first time You have
|
||||||
|
received notice of non-compliance with this License from such
|
||||||
|
Contributor, and You become compliant prior to 30 days after Your receipt
|
||||||
|
of the notice.
|
||||||
|
|
||||||
|
5.2. If You initiate litigation against any entity by asserting a patent
|
||||||
|
infringement claim (excluding declaratory judgment actions,
|
||||||
|
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||||
|
directly or indirectly infringes any patent, then the rights granted to
|
||||||
|
You by any and all Contributors for the Covered Software under Section
|
||||||
|
2.1 of this License shall terminate.
|
||||||
|
|
||||||
|
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||||
|
license agreements (excluding distributors and resellers) which have been
|
||||||
|
validly granted by You or Your distributors under this License prior to
|
||||||
|
termination shall survive termination.
|
||||||
|
|
||||||
|
6. Disclaimer of Warranty
|
||||||
|
|
||||||
|
Covered Software is provided under this License on an "as is" basis,
|
||||||
|
without warranty of any kind, either expressed, implied, or statutory,
|
||||||
|
including, without limitation, warranties that the Covered Software is free
|
||||||
|
of defects, merchantable, fit for a particular purpose or non-infringing.
|
||||||
|
The entire risk as to the quality and performance of the Covered Software
|
||||||
|
is with You. Should any Covered Software prove defective in any respect,
|
||||||
|
You (not any Contributor) assume the cost of any necessary servicing,
|
||||||
|
repair, or correction. This disclaimer of warranty constitutes an essential
|
||||||
|
part of this License. No use of any Covered Software is authorized under
|
||||||
|
this License except under this disclaimer.
|
||||||
|
|
||||||
|
7. Limitation of Liability
|
||||||
|
|
||||||
|
Under no circumstances and under no legal theory, whether tort (including
|
||||||
|
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||||
|
distributes Covered Software as permitted above, be liable to You for any
|
||||||
|
direct, indirect, special, incidental, or consequential damages of any
|
||||||
|
character including, without limitation, damages for lost profits, loss of
|
||||||
|
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses, even if such party shall have been
|
||||||
|
informed of the possibility of such damages. This limitation of liability
|
||||||
|
shall not apply to liability for death or personal injury resulting from
|
||||||
|
such party's negligence to the extent applicable law prohibits such
|
||||||
|
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
||||||
|
incidental or consequential damages, so this exclusion and limitation may
|
||||||
|
not apply to You.
|
||||||
|
|
||||||
|
8. Litigation
|
||||||
|
|
||||||
|
Any litigation relating to this License may be brought only in the courts
|
||||||
|
of a jurisdiction where the defendant maintains its principal place of
|
||||||
|
business and such litigation shall be governed by laws of that
|
||||||
|
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
||||||
|
in this Section shall prevent a party's ability to bring cross-claims or
|
||||||
|
counter-claims.
|
||||||
|
|
||||||
|
9. Miscellaneous
|
||||||
|
|
||||||
|
This License represents the complete agreement concerning the subject
|
||||||
|
matter hereof. If any provision of this License is held to be
|
||||||
|
unenforceable, such provision shall be reformed only to the extent
|
||||||
|
necessary to make it enforceable. Any law or regulation which provides that
|
||||||
|
the language of a contract shall be construed against the drafter shall not
|
||||||
|
be used to construe this License against a Contributor.
|
||||||
|
|
||||||
|
|
||||||
|
10. Versions of the License
|
||||||
|
|
||||||
|
10.1. New Versions
|
||||||
|
|
||||||
|
Mozilla Foundation is the license steward. Except as provided in Section
|
||||||
|
10.3, no one other than the license steward has the right to modify or
|
||||||
|
publish new versions of this License. Each version will be given a
|
||||||
|
distinguishing version number.
|
||||||
|
|
||||||
|
10.2. Effect of New Versions
|
||||||
|
|
||||||
|
You may distribute the Covered Software under the terms of the version
|
||||||
|
of the License under which You originally received the Covered Software,
|
||||||
|
or under the terms of any subsequent version published by the license
|
||||||
|
steward.
|
||||||
|
|
||||||
|
10.3. Modified Versions
|
||||||
|
|
||||||
|
If you create software not governed by this License, and you want to
|
||||||
|
create a new license for such software, you may create and use a
|
||||||
|
modified version of this License if you rename the license and remove
|
||||||
|
any references to the name of the license steward (except to note that
|
||||||
|
such modified license differs from this License).
|
||||||
|
|
||||||
|
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||||
|
Licenses If You choose to distribute Source Code Form that is
|
||||||
|
Incompatible With Secondary Licenses under the terms of this version of
|
||||||
|
the License, the notice described in Exhibit B of this License must be
|
||||||
|
attached.
|
||||||
|
|
||||||
|
Exhibit A - Source Code Form License Notice
|
||||||
|
|
||||||
|
This Source Code Form is subject to the
|
||||||
|
terms of the Mozilla Public License, v.
|
||||||
|
2.0. If a copy of the MPL was not
|
||||||
|
distributed with this file, You can
|
||||||
|
obtain one at
|
||||||
|
http://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
If it is not possible or desirable to put the notice in a particular file,
|
||||||
|
then You may include the notice in a location (such as a LICENSE file in a
|
||||||
|
relevant directory) where a recipient would be likely to look for such a
|
||||||
|
notice.
|
||||||
|
|
||||||
|
You may add additional accurate notices of copyright ownership.
|
||||||
|
|
||||||
|
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||||
|
|
||||||
|
This Source Code Form is "Incompatible
|
||||||
|
With Secondary Licenses", as defined by
|
||||||
|
the Mozilla Public License, v. 2.0.
|
||||||
|
|
500
vendor/github.com/hashicorp/go-retryablehttp/client.go
generated
vendored
Normal file
500
vendor/github.com/hashicorp/go-retryablehttp/client.go
generated
vendored
Normal file
|
@ -0,0 +1,500 @@
|
||||||
|
// The retryablehttp package provides a familiar HTTP client interface with
|
||||||
|
// automatic retries and exponential backoff. It is a thin wrapper over the
|
||||||
|
// standard net/http client library and exposes nearly the same public API.
|
||||||
|
// This makes retryablehttp very easy to drop into existing programs.
|
||||||
|
//
|
||||||
|
// retryablehttp performs automatic retries under certain conditions. Mainly, if
|
||||||
|
// an error is returned by the client (connection errors etc), or if a 500-range
|
||||||
|
// response is received, then a retry is invoked. Otherwise, the response is
|
||||||
|
// returned and left to the caller to interpret.
|
||||||
|
//
|
||||||
|
// Requests which take a request body should provide a non-nil function
|
||||||
|
// parameter. The best choice is to provide either a function satisfying
|
||||||
|
// ReaderFunc which provides multiple io.Readers in an efficient manner, a
|
||||||
|
// *bytes.Buffer (the underlying raw byte slice will be used) or a raw byte
|
||||||
|
// slice. As it is a reference type, and we will wrap it as needed by readers,
|
||||||
|
// we can efficiently re-use the request body without needing to copy it. If an
|
||||||
|
// io.Reader (such as a *bytes.Reader) is provided, the full body will be read
|
||||||
|
// prior to the first request, and will be efficiently re-used for any retries.
|
||||||
|
// ReadSeeker can be used, but some users have observed occasional data races
|
||||||
|
// between the net/http library and the Seek functionality of some
|
||||||
|
// implementations of ReadSeeker, so should be avoided if possible.
|
||||||
|
package retryablehttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"math"
|
||||||
|
"math/rand"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/go-cleanhttp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Default retry configuration
|
||||||
|
defaultRetryWaitMin = 1 * time.Second
|
||||||
|
defaultRetryWaitMax = 30 * time.Second
|
||||||
|
defaultRetryMax = 4
|
||||||
|
|
||||||
|
// defaultClient is used for performing requests without explicitly making
|
||||||
|
// a new client. It is purposely private to avoid modifications.
|
||||||
|
defaultClient = NewClient()
|
||||||
|
|
||||||
|
// We need to consume response bodies to maintain http connections, but
|
||||||
|
// limit the size we consume to respReadLimit.
|
||||||
|
respReadLimit = int64(4096)
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReaderFunc is the type of function that can be given natively to NewRequest
|
||||||
|
type ReaderFunc func() (io.Reader, error)
|
||||||
|
|
||||||
|
// LenReader is an interface implemented by many in-memory io.Reader's. Used
|
||||||
|
// for automatically sending the right Content-Length header when possible.
|
||||||
|
type LenReader interface {
|
||||||
|
Len() int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request wraps the metadata needed to create HTTP requests.
|
||||||
|
type Request struct {
|
||||||
|
// body is a seekable reader over the request body payload. This is
|
||||||
|
// used to rewind the request data in between retries.
|
||||||
|
body ReaderFunc
|
||||||
|
|
||||||
|
// Embed an HTTP request directly. This makes a *Request act exactly
|
||||||
|
// like an *http.Request so that all meta methods are supported.
|
||||||
|
*http.Request
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithContext returns wrapped Request with a shallow copy of underlying *http.Request
|
||||||
|
// with its context changed to ctx. The provided ctx must be non-nil.
|
||||||
|
func (r *Request) WithContext(ctx context.Context) *Request {
|
||||||
|
r.Request = r.Request.WithContext(ctx)
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRequest creates a new wrapped request.
|
||||||
|
func NewRequest(method, url string, rawBody interface{}) (*Request, error) {
|
||||||
|
var err error
|
||||||
|
var body ReaderFunc
|
||||||
|
var contentLength int64
|
||||||
|
|
||||||
|
if rawBody != nil {
|
||||||
|
switch rawBody.(type) {
|
||||||
|
// If they gave us a function already, great! Use it.
|
||||||
|
case ReaderFunc:
|
||||||
|
body = rawBody.(ReaderFunc)
|
||||||
|
tmp, err := body()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if lr, ok := tmp.(LenReader); ok {
|
||||||
|
contentLength = int64(lr.Len())
|
||||||
|
}
|
||||||
|
if c, ok := tmp.(io.Closer); ok {
|
||||||
|
c.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
case func() (io.Reader, error):
|
||||||
|
body = rawBody.(func() (io.Reader, error))
|
||||||
|
tmp, err := body()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if lr, ok := tmp.(LenReader); ok {
|
||||||
|
contentLength = int64(lr.Len())
|
||||||
|
}
|
||||||
|
if c, ok := tmp.(io.Closer); ok {
|
||||||
|
c.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a regular byte slice, we can read it over and over via new
|
||||||
|
// readers
|
||||||
|
case []byte:
|
||||||
|
buf := rawBody.([]byte)
|
||||||
|
body = func() (io.Reader, error) {
|
||||||
|
return bytes.NewReader(buf), nil
|
||||||
|
}
|
||||||
|
contentLength = int64(len(buf))
|
||||||
|
|
||||||
|
// If a bytes.Buffer we can read the underlying byte slice over and
|
||||||
|
// over
|
||||||
|
case *bytes.Buffer:
|
||||||
|
buf := rawBody.(*bytes.Buffer)
|
||||||
|
body = func() (io.Reader, error) {
|
||||||
|
return bytes.NewReader(buf.Bytes()), nil
|
||||||
|
}
|
||||||
|
contentLength = int64(buf.Len())
|
||||||
|
|
||||||
|
// We prioritize *bytes.Reader here because we don't really want to
|
||||||
|
// deal with it seeking so want it to match here instead of the
|
||||||
|
// io.ReadSeeker case.
|
||||||
|
case *bytes.Reader:
|
||||||
|
buf, err := ioutil.ReadAll(rawBody.(*bytes.Reader))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
body = func() (io.Reader, error) {
|
||||||
|
return bytes.NewReader(buf), nil
|
||||||
|
}
|
||||||
|
contentLength = int64(len(buf))
|
||||||
|
|
||||||
|
// Compat case
|
||||||
|
case io.ReadSeeker:
|
||||||
|
raw := rawBody.(io.ReadSeeker)
|
||||||
|
body = func() (io.Reader, error) {
|
||||||
|
raw.Seek(0, 0)
|
||||||
|
return ioutil.NopCloser(raw), nil
|
||||||
|
}
|
||||||
|
if lr, ok := raw.(LenReader); ok {
|
||||||
|
contentLength = int64(lr.Len())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read all in so we can reset
|
||||||
|
case io.Reader:
|
||||||
|
buf, err := ioutil.ReadAll(rawBody.(io.Reader))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
body = func() (io.Reader, error) {
|
||||||
|
return bytes.NewReader(buf), nil
|
||||||
|
}
|
||||||
|
contentLength = int64(len(buf))
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("cannot handle type %T", rawBody)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
httpReq, err := http.NewRequest(method, url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
httpReq.ContentLength = contentLength
|
||||||
|
|
||||||
|
return &Request{body, httpReq}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestLogHook allows a function to run before each retry. The HTTP
|
||||||
|
// request which will be made, and the retry number (0 for the initial
|
||||||
|
// request) are available to users. The internal logger is exposed to
|
||||||
|
// consumers.
|
||||||
|
type RequestLogHook func(*log.Logger, *http.Request, int)
|
||||||
|
|
||||||
|
// ResponseLogHook is like RequestLogHook, but allows running a function
|
||||||
|
// on each HTTP response. This function will be invoked at the end of
|
||||||
|
// every HTTP request executed, regardless of whether a subsequent retry
|
||||||
|
// needs to be performed or not. If the response body is read or closed
|
||||||
|
// from this method, this will affect the response returned from Do().
|
||||||
|
type ResponseLogHook func(*log.Logger, *http.Response)
|
||||||
|
|
||||||
|
// CheckRetry specifies a policy for handling retries. It is called
|
||||||
|
// following each request with the response and error values returned by
|
||||||
|
// the http.Client. If CheckRetry returns false, the Client stops retrying
|
||||||
|
// and returns the response to the caller. If CheckRetry returns an error,
|
||||||
|
// that error value is returned in lieu of the error from the request. The
|
||||||
|
// Client will close any response body when retrying, but if the retry is
|
||||||
|
// aborted it is up to the CheckResponse callback to properly close any
|
||||||
|
// response body before returning.
|
||||||
|
type CheckRetry func(ctx context.Context, resp *http.Response, err error) (bool, error)
|
||||||
|
|
||||||
|
// Backoff specifies a policy for how long to wait between retries.
|
||||||
|
// It is called after a failing request to determine the amount of time
|
||||||
|
// that should pass before trying again.
|
||||||
|
type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration
|
||||||
|
|
||||||
|
// ErrorHandler is called if retries are expired, containing the last status
|
||||||
|
// from the http library. If not specified, default behavior for the library is
|
||||||
|
// to close the body and return an error indicating how many tries were
|
||||||
|
// attempted. If overriding this, be sure to close the body if needed.
|
||||||
|
type ErrorHandler func(resp *http.Response, err error, numTries int) (*http.Response, error)
|
||||||
|
|
||||||
|
// Client is used to make HTTP requests. It adds additional functionality
|
||||||
|
// like automatic retries to tolerate minor outages.
|
||||||
|
type Client struct {
|
||||||
|
HTTPClient *http.Client // Internal HTTP client.
|
||||||
|
Logger *log.Logger // Customer logger instance.
|
||||||
|
|
||||||
|
RetryWaitMin time.Duration // Minimum time to wait
|
||||||
|
RetryWaitMax time.Duration // Maximum time to wait
|
||||||
|
RetryMax int // Maximum number of retries
|
||||||
|
|
||||||
|
// RequestLogHook allows a user-supplied function to be called
|
||||||
|
// before each retry.
|
||||||
|
RequestLogHook RequestLogHook
|
||||||
|
|
||||||
|
// ResponseLogHook allows a user-supplied function to be called
|
||||||
|
// with the response from each HTTP request executed.
|
||||||
|
ResponseLogHook ResponseLogHook
|
||||||
|
|
||||||
|
// CheckRetry specifies the policy for handling retries, and is called
|
||||||
|
// after each request. The default policy is DefaultRetryPolicy.
|
||||||
|
CheckRetry CheckRetry
|
||||||
|
|
||||||
|
// Backoff specifies the policy for how long to wait between retries
|
||||||
|
Backoff Backoff
|
||||||
|
|
||||||
|
// ErrorHandler specifies the custom error handler to use, if any
|
||||||
|
ErrorHandler ErrorHandler
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient creates a new Client with default settings.
|
||||||
|
func NewClient() *Client {
|
||||||
|
return &Client{
|
||||||
|
HTTPClient: cleanhttp.DefaultClient(),
|
||||||
|
Logger: log.New(os.Stderr, "", log.LstdFlags),
|
||||||
|
RetryWaitMin: defaultRetryWaitMin,
|
||||||
|
RetryWaitMax: defaultRetryWaitMax,
|
||||||
|
RetryMax: defaultRetryMax,
|
||||||
|
CheckRetry: DefaultRetryPolicy,
|
||||||
|
Backoff: DefaultBackoff,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which
|
||||||
|
// will retry on connection errors and server errors.
|
||||||
|
func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||||
|
// do not retry on context.Canceled or context.DeadlineExceeded
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return false, ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
// Check the response code. We retry on 500-range responses to allow
|
||||||
|
// the server time to recover, as 500's are typically not permanent
|
||||||
|
// errors and may relate to outages on the server side. This will catch
|
||||||
|
// invalid response codes as well, like 0 and 999.
|
||||||
|
if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != 501) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultBackoff provides a default callback for Client.Backoff which
|
||||||
|
// will perform exponential backoff based on the attempt number and limited
|
||||||
|
// by the provided minimum and maximum durations.
|
||||||
|
func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration {
|
||||||
|
mult := math.Pow(2, float64(attemptNum)) * float64(min)
|
||||||
|
sleep := time.Duration(mult)
|
||||||
|
if float64(sleep) != mult || sleep > max {
|
||||||
|
sleep = max
|
||||||
|
}
|
||||||
|
return sleep
|
||||||
|
}
|
||||||
|
|
||||||
|
// LinearJitterBackoff provides a callback for Client.Backoff which will
|
||||||
|
// perform linear backoff based on the attempt number and with jitter to
|
||||||
|
// prevent a thundering herd.
|
||||||
|
//
|
||||||
|
// min and max here are *not* absolute values. The number to be multipled by
|
||||||
|
// the attempt number will be chosen at random from between them, thus they are
|
||||||
|
// bounding the jitter.
|
||||||
|
//
|
||||||
|
// For instance:
|
||||||
|
// * To get strictly linear backoff of one second increasing each retry, set
|
||||||
|
// both to one second (1s, 2s, 3s, 4s, ...)
|
||||||
|
// * To get a small amount of jitter centered around one second increasing each
|
||||||
|
// retry, set to around one second, such as a min of 800ms and max of 1200ms
|
||||||
|
// (892ms, 2102ms, 2945ms, 4312ms, ...)
|
||||||
|
// * To get extreme jitter, set to a very wide spread, such as a min of 100ms
|
||||||
|
// and a max of 20s (15382ms, 292ms, 51321ms, 35234ms, ...)
|
||||||
|
func LinearJitterBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration {
|
||||||
|
// attemptNum always starts at zero but we want to start at 1 for multiplication
|
||||||
|
attemptNum++
|
||||||
|
|
||||||
|
if max <= min {
|
||||||
|
// Unclear what to do here, or they are the same, so return min *
|
||||||
|
// attemptNum
|
||||||
|
return min * time.Duration(attemptNum)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seed rand; doing this every time is fine
|
||||||
|
rand := rand.New(rand.NewSource(int64(time.Now().Nanosecond())))
|
||||||
|
|
||||||
|
// Pick a random number that lies somewhere between the min and max and
|
||||||
|
// multiply by the attemptNum. attemptNum starts at zero so we always
|
||||||
|
// increment here. We first get a random percentage, then apply that to the
|
||||||
|
// difference between min and max, and add to min.
|
||||||
|
jitter := rand.Float64() * float64(max-min)
|
||||||
|
jitterMin := int64(jitter) + int64(min)
|
||||||
|
return time.Duration(jitterMin * int64(attemptNum))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PassthroughErrorHandler is an ErrorHandler that directly passes through the
|
||||||
|
// values from the net/http library for the final request. The body is not
|
||||||
|
// closed.
|
||||||
|
func PassthroughErrorHandler(resp *http.Response, err error, _ int) (*http.Response, error) {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do wraps calling an HTTP method with retries.
|
||||||
|
func (c *Client) Do(req *Request) (*http.Response, error) {
|
||||||
|
if c.Logger != nil {
|
||||||
|
c.Logger.Printf("[DEBUG] %s %s", req.Method, req.URL)
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp *http.Response
|
||||||
|
var err error
|
||||||
|
|
||||||
|
for i := 0; ; i++ {
|
||||||
|
var code int // HTTP response code
|
||||||
|
|
||||||
|
// Always rewind the request body when non-nil.
|
||||||
|
if req.body != nil {
|
||||||
|
body, err := req.body()
|
||||||
|
if err != nil {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
if c, ok := body.(io.ReadCloser); ok {
|
||||||
|
req.Request.Body = c
|
||||||
|
} else {
|
||||||
|
req.Request.Body = ioutil.NopCloser(body)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.RequestLogHook != nil {
|
||||||
|
c.RequestLogHook(c.Logger, req.Request, i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempt the request
|
||||||
|
resp, err = c.HTTPClient.Do(req.Request)
|
||||||
|
if resp != nil {
|
||||||
|
code = resp.StatusCode
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we should continue with retries.
|
||||||
|
checkOK, checkErr := c.CheckRetry(req.Request.Context(), resp, err)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if c.Logger != nil {
|
||||||
|
c.Logger.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Call this here to maintain the behavior of logging all requests,
|
||||||
|
// even if CheckRetry signals to stop.
|
||||||
|
if c.ResponseLogHook != nil {
|
||||||
|
// Call the response logger function if provided.
|
||||||
|
c.ResponseLogHook(c.Logger, resp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now decide if we should continue.
|
||||||
|
if !checkOK {
|
||||||
|
if checkErr != nil {
|
||||||
|
err = checkErr
|
||||||
|
}
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We do this before drainBody beause there's no need for the I/O if
|
||||||
|
// we're breaking out
|
||||||
|
remain := c.RetryMax - i
|
||||||
|
if remain <= 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// We're going to retry, consume any response to reuse the connection.
|
||||||
|
if err == nil && resp != nil {
|
||||||
|
c.drainBody(resp.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp)
|
||||||
|
desc := fmt.Sprintf("%s %s", req.Method, req.URL)
|
||||||
|
if code > 0 {
|
||||||
|
desc = fmt.Sprintf("%s (status: %d)", desc, code)
|
||||||
|
}
|
||||||
|
if c.Logger != nil {
|
||||||
|
c.Logger.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain)
|
||||||
|
}
|
||||||
|
time.Sleep(wait)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.ErrorHandler != nil {
|
||||||
|
return c.ErrorHandler(resp, err, c.RetryMax+1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// By default, we close the response body and return an error without
|
||||||
|
// returning the response
|
||||||
|
if resp != nil {
|
||||||
|
resp.Body.Close()
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("%s %s giving up after %d attempts",
|
||||||
|
req.Method, req.URL, c.RetryMax+1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to read the response body so we can reuse this connection.
|
||||||
|
func (c *Client) drainBody(body io.ReadCloser) {
|
||||||
|
defer body.Close()
|
||||||
|
_, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit))
|
||||||
|
if err != nil {
|
||||||
|
if c.Logger != nil {
|
||||||
|
c.Logger.Printf("[ERR] error reading response body: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get is a shortcut for doing a GET request without making a new client.
|
||||||
|
func Get(url string) (*http.Response, error) {
|
||||||
|
return defaultClient.Get(url)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get is a convenience helper for doing simple GET requests.
|
||||||
|
func (c *Client) Get(url string) (*http.Response, error) {
|
||||||
|
req, err := NewRequest("GET", url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return c.Do(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Head is a shortcut for doing a HEAD request without making a new client.
|
||||||
|
func Head(url string) (*http.Response, error) {
|
||||||
|
return defaultClient.Head(url)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Head is a convenience method for doing simple HEAD requests.
|
||||||
|
func (c *Client) Head(url string) (*http.Response, error) {
|
||||||
|
req, err := NewRequest("HEAD", url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return c.Do(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Post is a shortcut for doing a POST request without making a new client.
|
||||||
|
func Post(url, bodyType string, body interface{}) (*http.Response, error) {
|
||||||
|
return defaultClient.Post(url, bodyType, body)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Post is a convenience method for doing simple POST requests.
|
||||||
|
func (c *Client) Post(url, bodyType string, body interface{}) (*http.Response, error) {
|
||||||
|
req, err := NewRequest("POST", url, body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", bodyType)
|
||||||
|
return c.Do(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PostForm is a shortcut to perform a POST with form data without creating
|
||||||
|
// a new client.
|
||||||
|
func PostForm(url string, data url.Values) (*http.Response, error) {
|
||||||
|
return defaultClient.PostForm(url, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PostForm is a convenience method for doing simple POST operations using
|
||||||
|
// pre-filled url.Values form data.
|
||||||
|
func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) {
|
||||||
|
return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
|
||||||
|
}
|
12
vendor/github.com/hashicorp/go-rootcerts/.travis.yml
generated
vendored
12
vendor/github.com/hashicorp/go-rootcerts/.travis.yml
generated
vendored
|
@ -1,12 +0,0 @@
|
||||||
sudo: false
|
|
||||||
|
|
||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.6
|
|
||||||
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- master
|
|
||||||
|
|
||||||
script: make test
|
|
8
vendor/github.com/hashicorp/go-rootcerts/Makefile
generated
vendored
8
vendor/github.com/hashicorp/go-rootcerts/Makefile
generated
vendored
|
@ -1,8 +0,0 @@
|
||||||
TEST?=./...
|
|
||||||
|
|
||||||
test:
|
|
||||||
go test $(TEST) $(TESTARGS) -timeout=3s -parallel=4
|
|
||||||
go vet $(TEST)
|
|
||||||
go test $(TEST) -race
|
|
||||||
|
|
||||||
.PHONY: test
|
|
43
vendor/github.com/hashicorp/go-rootcerts/README.md
generated
vendored
43
vendor/github.com/hashicorp/go-rootcerts/README.md
generated
vendored
|
@ -1,43 +0,0 @@
|
||||||
# rootcerts
|
|
||||||
|
|
||||||
Functions for loading root certificates for TLS connections.
|
|
||||||
|
|
||||||
-----
|
|
||||||
|
|
||||||
Go's standard library `crypto/tls` provides a common mechanism for configuring
|
|
||||||
TLS connections in `tls.Config`. The `RootCAs` field on this struct is a pool
|
|
||||||
of certificates for the client to use as a trust store when verifying server
|
|
||||||
certificates.
|
|
||||||
|
|
||||||
This library contains utility functions for loading certificates destined for
|
|
||||||
that field, as well as one other important thing:
|
|
||||||
|
|
||||||
When the `RootCAs` field is `nil`, the standard library attempts to load the
|
|
||||||
host's root CA set. This behavior is OS-specific, and the Darwin
|
|
||||||
implementation contains [a bug that prevents trusted certificates from the
|
|
||||||
System and Login keychains from being loaded][1]. This library contains
|
|
||||||
Darwin-specific behavior that works around that bug.
|
|
||||||
|
|
||||||
[1]: https://github.com/golang/go/issues/14514
|
|
||||||
|
|
||||||
## Example Usage
|
|
||||||
|
|
||||||
Here's a snippet demonstrating how this library is meant to be used:
|
|
||||||
|
|
||||||
```go
|
|
||||||
func httpClient() (*http.Client, error)
|
|
||||||
tlsConfig := &tls.Config{}
|
|
||||||
err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{
|
|
||||||
CAFile: os.Getenv("MYAPP_CAFILE"),
|
|
||||||
CAPath: os.Getenv("MYAPP_CAPATH"),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
c := cleanhttp.DefaultClient()
|
|
||||||
t := cleanhttp.DefaultTransport()
|
|
||||||
t.TLSClientConfig = tlsConfig
|
|
||||||
c.Transport = t
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
```
|
|
1
vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/securetrust.pem
generated
vendored
Symbolic link
1
vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/securetrust.pem
generated
vendored
Symbolic link
|
@ -0,0 +1 @@
|
||||||
|
../capath/securetrust.pem
|
1
vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/thawte.pem
generated
vendored
Symbolic link
1
vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/thawte.pem
generated
vendored
Symbolic link
|
@ -0,0 +1 @@
|
||||||
|
../capath/thawte.pem
|
373
vendor/github.com/hashicorp/go-sockaddr/LICENSE
generated
vendored
Normal file
373
vendor/github.com/hashicorp/go-sockaddr/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,373 @@
|
||||||
|
Mozilla Public License Version 2.0
|
||||||
|
==================================
|
||||||
|
|
||||||
|
1. Definitions
|
||||||
|
--------------
|
||||||
|
|
||||||
|
1.1. "Contributor"
|
||||||
|
means each individual or legal entity that creates, contributes to
|
||||||
|
the creation of, or owns Covered Software.
|
||||||
|
|
||||||
|
1.2. "Contributor Version"
|
||||||
|
means the combination of the Contributions of others (if any) used
|
||||||
|
by a Contributor and that particular Contributor's Contribution.
|
||||||
|
|
||||||
|
1.3. "Contribution"
|
||||||
|
means Covered Software of a particular Contributor.
|
||||||
|
|
||||||
|
1.4. "Covered Software"
|
||||||
|
means Source Code Form to which the initial Contributor has attached
|
||||||
|
the notice in Exhibit A, the Executable Form of such Source Code
|
||||||
|
Form, and Modifications of such Source Code Form, in each case
|
||||||
|
including portions thereof.
|
||||||
|
|
||||||
|
1.5. "Incompatible With Secondary Licenses"
|
||||||
|
means
|
||||||
|
|
||||||
|
(a) that the initial Contributor has attached the notice described
|
||||||
|
in Exhibit B to the Covered Software; or
|
||||||
|
|
||||||
|
(b) that the Covered Software was made available under the terms of
|
||||||
|
version 1.1 or earlier of the License, but not also under the
|
||||||
|
terms of a Secondary License.
|
||||||
|
|
||||||
|
1.6. "Executable Form"
|
||||||
|
means any form of the work other than Source Code Form.
|
||||||
|
|
||||||
|
1.7. "Larger Work"
|
||||||
|
means a work that combines Covered Software with other material, in
|
||||||
|
a separate file or files, that is not Covered Software.
|
||||||
|
|
||||||
|
1.8. "License"
|
||||||
|
means this document.
|
||||||
|
|
||||||
|
1.9. "Licensable"
|
||||||
|
means having the right to grant, to the maximum extent possible,
|
||||||
|
whether at the time of the initial grant or subsequently, any and
|
||||||
|
all of the rights conveyed by this License.
|
||||||
|
|
||||||
|
1.10. "Modifications"
|
||||||
|
means any of the following:
|
||||||
|
|
||||||
|
(a) any file in Source Code Form that results from an addition to,
|
||||||
|
deletion from, or modification of the contents of Covered
|
||||||
|
Software; or
|
||||||
|
|
||||||
|
(b) any new file in Source Code Form that contains any Covered
|
||||||
|
Software.
|
||||||
|
|
||||||
|
1.11. "Patent Claims" of a Contributor
|
||||||
|
means any patent claim(s), including without limitation, method,
|
||||||
|
process, and apparatus claims, in any patent Licensable by such
|
||||||
|
Contributor that would be infringed, but for the grant of the
|
||||||
|
License, by the making, using, selling, offering for sale, having
|
||||||
|
made, import, or transfer of either its Contributions or its
|
||||||
|
Contributor Version.
|
||||||
|
|
||||||
|
1.12. "Secondary License"
|
||||||
|
means either the GNU General Public License, Version 2.0, the GNU
|
||||||
|
Lesser General Public License, Version 2.1, the GNU Affero General
|
||||||
|
Public License, Version 3.0, or any later versions of those
|
||||||
|
licenses.
|
||||||
|
|
||||||
|
1.13. "Source Code Form"
|
||||||
|
means the form of the work preferred for making modifications.
|
||||||
|
|
||||||
|
1.14. "You" (or "Your")
|
||||||
|
means an individual or a legal entity exercising rights under this
|
||||||
|
License. For legal entities, "You" includes any entity that
|
||||||
|
controls, is controlled by, or is under common control with You. For
|
||||||
|
purposes of this definition, "control" means (a) the power, direct
|
||||||
|
or indirect, to cause the direction or management of such entity,
|
||||||
|
whether by contract or otherwise, or (b) ownership of more than
|
||||||
|
fifty percent (50%) of the outstanding shares or beneficial
|
||||||
|
ownership of such entity.
|
||||||
|
|
||||||
|
2. License Grants and Conditions
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
2.1. Grants
|
||||||
|
|
||||||
|
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||||
|
non-exclusive license:
|
||||||
|
|
||||||
|
(a) under intellectual property rights (other than patent or trademark)
|
||||||
|
Licensable by such Contributor to use, reproduce, make available,
|
||||||
|
modify, display, perform, distribute, and otherwise exploit its
|
||||||
|
Contributions, either on an unmodified basis, with Modifications, or
|
||||||
|
as part of a Larger Work; and
|
||||||
|
|
||||||
|
(b) under Patent Claims of such Contributor to make, use, sell, offer
|
||||||
|
for sale, have made, import, and otherwise transfer either its
|
||||||
|
Contributions or its Contributor Version.
|
||||||
|
|
||||||
|
2.2. Effective Date
|
||||||
|
|
||||||
|
The licenses granted in Section 2.1 with respect to any Contribution
|
||||||
|
become effective for each Contribution on the date the Contributor first
|
||||||
|
distributes such Contribution.
|
||||||
|
|
||||||
|
2.3. Limitations on Grant Scope
|
||||||
|
|
||||||
|
The licenses granted in this Section 2 are the only rights granted under
|
||||||
|
this License. No additional rights or licenses will be implied from the
|
||||||
|
distribution or licensing of Covered Software under this License.
|
||||||
|
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||||
|
Contributor:
|
||||||
|
|
||||||
|
(a) for any code that a Contributor has removed from Covered Software;
|
||||||
|
or
|
||||||
|
|
||||||
|
(b) for infringements caused by: (i) Your and any other third party's
|
||||||
|
modifications of Covered Software, or (ii) the combination of its
|
||||||
|
Contributions with other software (except as part of its Contributor
|
||||||
|
Version); or
|
||||||
|
|
||||||
|
(c) under Patent Claims infringed by Covered Software in the absence of
|
||||||
|
its Contributions.
|
||||||
|
|
||||||
|
This License does not grant any rights in the trademarks, service marks,
|
||||||
|
or logos of any Contributor (except as may be necessary to comply with
|
||||||
|
the notice requirements in Section 3.4).
|
||||||
|
|
||||||
|
2.4. Subsequent Licenses
|
||||||
|
|
||||||
|
No Contributor makes additional grants as a result of Your choice to
|
||||||
|
distribute the Covered Software under a subsequent version of this
|
||||||
|
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||||
|
permitted under the terms of Section 3.3).
|
||||||
|
|
||||||
|
2.5. Representation
|
||||||
|
|
||||||
|
Each Contributor represents that the Contributor believes its
|
||||||
|
Contributions are its original creation(s) or it has sufficient rights
|
||||||
|
to grant the rights to its Contributions conveyed by this License.
|
||||||
|
|
||||||
|
2.6. Fair Use
|
||||||
|
|
||||||
|
This License is not intended to limit any rights You have under
|
||||||
|
applicable copyright doctrines of fair use, fair dealing, or other
|
||||||
|
equivalents.
|
||||||
|
|
||||||
|
2.7. Conditions
|
||||||
|
|
||||||
|
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
|
||||||
|
in Section 2.1.
|
||||||
|
|
||||||
|
3. Responsibilities
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
3.1. Distribution of Source Form
|
||||||
|
|
||||||
|
All distribution of Covered Software in Source Code Form, including any
|
||||||
|
Modifications that You create or to which You contribute, must be under
|
||||||
|
the terms of this License. You must inform recipients that the Source
|
||||||
|
Code Form of the Covered Software is governed by the terms of this
|
||||||
|
License, and how they can obtain a copy of this License. You may not
|
||||||
|
attempt to alter or restrict the recipients' rights in the Source Code
|
||||||
|
Form.
|
||||||
|
|
||||||
|
3.2. Distribution of Executable Form
|
||||||
|
|
||||||
|
If You distribute Covered Software in Executable Form then:
|
||||||
|
|
||||||
|
(a) such Covered Software must also be made available in Source Code
|
||||||
|
Form, as described in Section 3.1, and You must inform recipients of
|
||||||
|
the Executable Form how they can obtain a copy of such Source Code
|
||||||
|
Form by reasonable means in a timely manner, at a charge no more
|
||||||
|
than the cost of distribution to the recipient; and
|
||||||
|
|
||||||
|
(b) You may distribute such Executable Form under the terms of this
|
||||||
|
License, or sublicense it under different terms, provided that the
|
||||||
|
license for the Executable Form does not attempt to limit or alter
|
||||||
|
the recipients' rights in the Source Code Form under this License.
|
||||||
|
|
||||||
|
3.3. Distribution of a Larger Work
|
||||||
|
|
||||||
|
You may create and distribute a Larger Work under terms of Your choice,
|
||||||
|
provided that You also comply with the requirements of this License for
|
||||||
|
the Covered Software. If the Larger Work is a combination of Covered
|
||||||
|
Software with a work governed by one or more Secondary Licenses, and the
|
||||||
|
Covered Software is not Incompatible With Secondary Licenses, this
|
||||||
|
License permits You to additionally distribute such Covered Software
|
||||||
|
under the terms of such Secondary License(s), so that the recipient of
|
||||||
|
the Larger Work may, at their option, further distribute the Covered
|
||||||
|
Software under the terms of either this License or such Secondary
|
||||||
|
License(s).
|
||||||
|
|
||||||
|
3.4. Notices
|
||||||
|
|
||||||
|
You may not remove or alter the substance of any license notices
|
||||||
|
(including copyright notices, patent notices, disclaimers of warranty,
|
||||||
|
or limitations of liability) contained within the Source Code Form of
|
||||||
|
the Covered Software, except that You may alter any license notices to
|
||||||
|
the extent required to remedy known factual inaccuracies.
|
||||||
|
|
||||||
|
3.5. Application of Additional Terms
|
||||||
|
|
||||||
|
You may choose to offer, and to charge a fee for, warranty, support,
|
||||||
|
indemnity or liability obligations to one or more recipients of Covered
|
||||||
|
Software. However, You may do so only on Your own behalf, and not on
|
||||||
|
behalf of any Contributor. You must make it absolutely clear that any
|
||||||
|
such warranty, support, indemnity, or liability obligation is offered by
|
||||||
|
You alone, and You hereby agree to indemnify every Contributor for any
|
||||||
|
liability incurred by such Contributor as a result of warranty, support,
|
||||||
|
indemnity or liability terms You offer. You may include additional
|
||||||
|
disclaimers of warranty and limitations of liability specific to any
|
||||||
|
jurisdiction.
|
||||||
|
|
||||||
|
4. Inability to Comply Due to Statute or Regulation
|
||||||
|
---------------------------------------------------
|
||||||
|
|
||||||
|
If it is impossible for You to comply with any of the terms of this
|
||||||
|
License with respect to some or all of the Covered Software due to
|
||||||
|
statute, judicial order, or regulation then You must: (a) comply with
|
||||||
|
the terms of this License to the maximum extent possible; and (b)
|
||||||
|
describe the limitations and the code they affect. Such description must
|
||||||
|
be placed in a text file included with all distributions of the Covered
|
||||||
|
Software under this License. Except to the extent prohibited by statute
|
||||||
|
or regulation, such description must be sufficiently detailed for a
|
||||||
|
recipient of ordinary skill to be able to understand it.
|
||||||
|
|
||||||
|
5. Termination
|
||||||
|
--------------
|
||||||
|
|
||||||
|
5.1. The rights granted under this License will terminate automatically
|
||||||
|
if You fail to comply with any of its terms. However, if You become
|
||||||
|
compliant, then the rights granted under this License from a particular
|
||||||
|
Contributor are reinstated (a) provisionally, unless and until such
|
||||||
|
Contributor explicitly and finally terminates Your grants, and (b) on an
|
||||||
|
ongoing basis, if such Contributor fails to notify You of the
|
||||||
|
non-compliance by some reasonable means prior to 60 days after You have
|
||||||
|
come back into compliance. Moreover, Your grants from a particular
|
||||||
|
Contributor are reinstated on an ongoing basis if such Contributor
|
||||||
|
notifies You of the non-compliance by some reasonable means, this is the
|
||||||
|
first time You have received notice of non-compliance with this License
|
||||||
|
from such Contributor, and You become compliant prior to 30 days after
|
||||||
|
Your receipt of the notice.
|
||||||
|
|
||||||
|
5.2. If You initiate litigation against any entity by asserting a patent
|
||||||
|
infringement claim (excluding declaratory judgment actions,
|
||||||
|
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||||
|
directly or indirectly infringes any patent, then the rights granted to
|
||||||
|
You by any and all Contributors for the Covered Software under Section
|
||||||
|
2.1 of this License shall terminate.
|
||||||
|
|
||||||
|
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
|
||||||
|
end user license agreements (excluding distributors and resellers) which
|
||||||
|
have been validly granted by You or Your distributors under this License
|
||||||
|
prior to termination shall survive termination.
|
||||||
|
|
||||||
|
************************************************************************
|
||||||
|
* *
|
||||||
|
* 6. Disclaimer of Warranty *
|
||||||
|
* ------------------------- *
|
||||||
|
* *
|
||||||
|
* Covered Software is provided under this License on an "as is" *
|
||||||
|
* basis, without warranty of any kind, either expressed, implied, or *
|
||||||
|
* statutory, including, without limitation, warranties that the *
|
||||||
|
* Covered Software is free of defects, merchantable, fit for a *
|
||||||
|
* particular purpose or non-infringing. The entire risk as to the *
|
||||||
|
* quality and performance of the Covered Software is with You. *
|
||||||
|
* Should any Covered Software prove defective in any respect, You *
|
||||||
|
* (not any Contributor) assume the cost of any necessary servicing, *
|
||||||
|
* repair, or correction. This disclaimer of warranty constitutes an *
|
||||||
|
* essential part of this License. No use of any Covered Software is *
|
||||||
|
* authorized under this License except under this disclaimer. *
|
||||||
|
* *
|
||||||
|
************************************************************************
|
||||||
|
|
||||||
|
************************************************************************
|
||||||
|
* *
|
||||||
|
* 7. Limitation of Liability *
|
||||||
|
* -------------------------- *
|
||||||
|
* *
|
||||||
|
* Under no circumstances and under no legal theory, whether tort *
|
||||||
|
* (including negligence), contract, or otherwise, shall any *
|
||||||
|
* Contributor, or anyone who distributes Covered Software as *
|
||||||
|
* permitted above, be liable to You for any direct, indirect, *
|
||||||
|
* special, incidental, or consequential damages of any character *
|
||||||
|
* including, without limitation, damages for lost profits, loss of *
|
||||||
|
* goodwill, work stoppage, computer failure or malfunction, or any *
|
||||||
|
* and all other commercial damages or losses, even if such party *
|
||||||
|
* shall have been informed of the possibility of such damages. This *
|
||||||
|
* limitation of liability shall not apply to liability for death or *
|
||||||
|
* personal injury resulting from such party's negligence to the *
|
||||||
|
* extent applicable law prohibits such limitation. Some *
|
||||||
|
* jurisdictions do not allow the exclusion or limitation of *
|
||||||
|
* incidental or consequential damages, so this exclusion and *
|
||||||
|
* limitation may not apply to You. *
|
||||||
|
* *
|
||||||
|
************************************************************************
|
||||||
|
|
||||||
|
8. Litigation
|
||||||
|
-------------
|
||||||
|
|
||||||
|
Any litigation relating to this License may be brought only in the
|
||||||
|
courts of a jurisdiction where the defendant maintains its principal
|
||||||
|
place of business and such litigation shall be governed by laws of that
|
||||||
|
jurisdiction, without reference to its conflict-of-law provisions.
|
||||||
|
Nothing in this Section shall prevent a party's ability to bring
|
||||||
|
cross-claims or counter-claims.
|
||||||
|
|
||||||
|
9. Miscellaneous
|
||||||
|
----------------
|
||||||
|
|
||||||
|
This License represents the complete agreement concerning the subject
|
||||||
|
matter hereof. If any provision of this License is held to be
|
||||||
|
unenforceable, such provision shall be reformed only to the extent
|
||||||
|
necessary to make it enforceable. Any law or regulation which provides
|
||||||
|
that the language of a contract shall be construed against the drafter
|
||||||
|
shall not be used to construe this License against a Contributor.
|
||||||
|
|
||||||
|
10. Versions of the License
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
10.1. New Versions
|
||||||
|
|
||||||
|
Mozilla Foundation is the license steward. Except as provided in Section
|
||||||
|
10.3, no one other than the license steward has the right to modify or
|
||||||
|
publish new versions of this License. Each version will be given a
|
||||||
|
distinguishing version number.
|
||||||
|
|
||||||
|
10.2. Effect of New Versions
|
||||||
|
|
||||||
|
You may distribute the Covered Software under the terms of the version
|
||||||
|
of the License under which You originally received the Covered Software,
|
||||||
|
or under the terms of any subsequent version published by the license
|
||||||
|
steward.
|
||||||
|
|
||||||
|
10.3. Modified Versions
|
||||||
|
|
||||||
|
If you create software not governed by this License, and you want to
|
||||||
|
create a new license for such software, you may create and use a
|
||||||
|
modified version of this License if you rename the license and remove
|
||||||
|
any references to the name of the license steward (except to note that
|
||||||
|
such modified license differs from this License).
|
||||||
|
|
||||||
|
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||||
|
Licenses
|
||||||
|
|
||||||
|
If You choose to distribute Source Code Form that is Incompatible With
|
||||||
|
Secondary Licenses under the terms of this version of the License, the
|
||||||
|
notice described in Exhibit B of this License must be attached.
|
||||||
|
|
||||||
|
Exhibit A - Source Code Form License Notice
|
||||||
|
-------------------------------------------
|
||||||
|
|
||||||
|
This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||||
|
file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
If it is not possible or desirable to put the notice in a particular
|
||||||
|
file, then You may include the notice in a location (such as a LICENSE
|
||||||
|
file in a relevant directory) where a recipient would be likely to look
|
||||||
|
for such a notice.
|
||||||
|
|
||||||
|
You may add additional accurate notices of copyright ownership.
|
||||||
|
|
||||||
|
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||||
|
---------------------------------------------------------
|
||||||
|
|
||||||
|
This Source Code Form is "Incompatible With Secondary Licenses", as
|
||||||
|
defined by the Mozilla Public License, v. 2.0.
|
5
vendor/github.com/hashicorp/go-sockaddr/doc.go
generated
vendored
Normal file
5
vendor/github.com/hashicorp/go-sockaddr/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
/*
|
||||||
|
Package sockaddr is a Go implementation of the UNIX socket family data types and
|
||||||
|
related helper functions.
|
||||||
|
*/
|
||||||
|
package sockaddr
|
254
vendor/github.com/hashicorp/go-sockaddr/ifaddr.go
generated
vendored
Normal file
254
vendor/github.com/hashicorp/go-sockaddr/ifaddr.go
generated
vendored
Normal file
|
@ -0,0 +1,254 @@
|
||||||
|
package sockaddr
|
||||||
|
|
||||||
|
import "strings"
|
||||||
|
|
||||||
|
// ifAddrAttrMap is a map of the IfAddr type-specific attributes.
|
||||||
|
var ifAddrAttrMap map[AttrName]func(IfAddr) string
|
||||||
|
var ifAddrAttrs []AttrName
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
ifAddrAttrInit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPrivateIP returns a string with a single IP address that is part of RFC
|
||||||
|
// 6890 and has a default route. If the system can't determine its IP address
|
||||||
|
// or find an RFC 6890 IP address, an empty string will be returned instead.
|
||||||
|
// This function is the `eval` equivalent of:
|
||||||
|
//
|
||||||
|
// ```
|
||||||
|
// $ sockaddr eval -r '{{GetPrivateInterfaces | attr "address"}}'
|
||||||
|
/// ```
|
||||||
|
func GetPrivateIP() (string, error) {
|
||||||
|
privateIfs, err := GetPrivateInterfaces()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if len(privateIfs) < 1 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ifAddr := privateIfs[0]
|
||||||
|
ip := *ToIPAddr(ifAddr.SockAddr)
|
||||||
|
return ip.NetIP().String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPrivateIPs returns a string with all IP addresses that are part of RFC
|
||||||
|
// 6890 (regardless of whether or not there is a default route, unlike
|
||||||
|
// GetPublicIP). If the system can't find any RFC 6890 IP addresses, an empty
|
||||||
|
// string will be returned instead. This function is the `eval` equivalent of:
|
||||||
|
//
|
||||||
|
// ```
|
||||||
|
// $ sockaddr eval -r '{{GetAllInterfaces | include "RFC" "6890" | join "address" " "}}'
|
||||||
|
/// ```
|
||||||
|
func GetPrivateIPs() (string, error) {
|
||||||
|
ifAddrs, err := GetAllInterfaces()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
} else if len(ifAddrs) < 1 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ifAddrs, _ = FilterIfByType(ifAddrs, TypeIP)
|
||||||
|
if len(ifAddrs) == 0 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
OrderedIfAddrBy(AscIfType, AscIfNetworkSize).Sort(ifAddrs)
|
||||||
|
|
||||||
|
ifAddrs, _, err = IfByRFC("6890", ifAddrs)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
} else if len(ifAddrs) == 0 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ifAddrs, err = IfByRFC(ForwardingBlacklistRFC, ifAddrs)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
} else if len(ifAddrs) == 0 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ips := make([]string, 0, len(ifAddrs))
|
||||||
|
for _, ifAddr := range ifAddrs {
|
||||||
|
ip := *ToIPAddr(ifAddr.SockAddr)
|
||||||
|
s := ip.NetIP().String()
|
||||||
|
ips = append(ips, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(ips, " "), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPublicIP returns a string with a single IP address that is NOT part of RFC
|
||||||
|
// 6890 and has a default route. If the system can't determine its IP address
|
||||||
|
// or find a non RFC 6890 IP address, an empty string will be returned instead.
|
||||||
|
// This function is the `eval` equivalent of:
|
||||||
|
//
|
||||||
|
// ```
|
||||||
|
// $ sockaddr eval -r '{{GetPublicInterfaces | attr "address"}}'
|
||||||
|
/// ```
|
||||||
|
func GetPublicIP() (string, error) {
|
||||||
|
publicIfs, err := GetPublicInterfaces()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
} else if len(publicIfs) < 1 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ifAddr := publicIfs[0]
|
||||||
|
ip := *ToIPAddr(ifAddr.SockAddr)
|
||||||
|
return ip.NetIP().String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPublicIPs returns a string with all IP addresses that are NOT part of RFC
|
||||||
|
// 6890 (regardless of whether or not there is a default route, unlike
|
||||||
|
// GetPublicIP). If the system can't find any non RFC 6890 IP addresses, an
|
||||||
|
// empty string will be returned instead. This function is the `eval`
|
||||||
|
// equivalent of:
|
||||||
|
//
|
||||||
|
// ```
|
||||||
|
// $ sockaddr eval -r '{{GetAllInterfaces | exclude "RFC" "6890" | join "address" " "}}'
|
||||||
|
/// ```
|
||||||
|
func GetPublicIPs() (string, error) {
|
||||||
|
ifAddrs, err := GetAllInterfaces()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
} else if len(ifAddrs) < 1 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ifAddrs, _ = FilterIfByType(ifAddrs, TypeIP)
|
||||||
|
if len(ifAddrs) == 0 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
OrderedIfAddrBy(AscIfType, AscIfNetworkSize).Sort(ifAddrs)
|
||||||
|
|
||||||
|
_, ifAddrs, err = IfByRFC("6890", ifAddrs)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
} else if len(ifAddrs) == 0 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ips := make([]string, 0, len(ifAddrs))
|
||||||
|
for _, ifAddr := range ifAddrs {
|
||||||
|
ip := *ToIPAddr(ifAddr.SockAddr)
|
||||||
|
s := ip.NetIP().String()
|
||||||
|
ips = append(ips, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(ips, " "), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetInterfaceIP returns a string with a single IP address sorted by the size
|
||||||
|
// of the network (i.e. IP addresses with a smaller netmask, larger network
|
||||||
|
// size, are sorted first). This function is the `eval` equivalent of:
|
||||||
|
//
|
||||||
|
// ```
|
||||||
|
// $ sockaddr eval -r '{{GetAllInterfaces | include "name" <<ARG>> | sort "type,size" | include "flag" "forwardable" | attr "address" }}'
|
||||||
|
/// ```
|
||||||
|
func GetInterfaceIP(namedIfRE string) (string, error) {
|
||||||
|
ifAddrs, err := GetAllInterfaces()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
ifAddrs, _, err = IfByName(namedIfRE, ifAddrs)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
ifAddrs, _, err = IfByFlag("forwardable", ifAddrs)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
ifAddrs, err = SortIfBy("+type,+size", ifAddrs)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ifAddrs) == 0 {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
ip := ToIPAddr(ifAddrs[0].SockAddr)
|
||||||
|
if ip == nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return IPAddrAttr(*ip, "address"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetInterfaceIPs returns a string with all IPs, sorted by the size of the
|
||||||
|
// network (i.e. IP addresses with a smaller netmask, larger network size, are
|
||||||
|
// sorted first), on a named interface. This function is the `eval` equivalent
|
||||||
|
// of:
|
||||||
|
//
|
||||||
|
// ```
|
||||||
|
// $ sockaddr eval -r '{{GetAllInterfaces | include "name" <<ARG>> | sort "type,size" | join "address" " "}}'
|
||||||
|
/// ```
|
||||||
|
func GetInterfaceIPs(namedIfRE string) (string, error) {
|
||||||
|
ifAddrs, err := GetAllInterfaces()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
ifAddrs, _, err = IfByName(namedIfRE, ifAddrs)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
ifAddrs, err = SortIfBy("+type,+size", ifAddrs)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ifAddrs) == 0 {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
ips := make([]string, 0, len(ifAddrs))
|
||||||
|
for _, ifAddr := range ifAddrs {
|
||||||
|
ip := *ToIPAddr(ifAddr.SockAddr)
|
||||||
|
s := ip.NetIP().String()
|
||||||
|
ips = append(ips, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(ips, " "), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IfAddrAttrs returns a list of attributes supported by the IfAddr type
|
||||||
|
func IfAddrAttrs() []AttrName {
|
||||||
|
return ifAddrAttrs
|
||||||
|
}
|
||||||
|
|
||||||
|
// IfAddrAttr returns a string representation of an attribute for the given
|
||||||
|
// IfAddr.
|
||||||
|
func IfAddrAttr(ifAddr IfAddr, attrName AttrName) string {
|
||||||
|
fn, found := ifAddrAttrMap[attrName]
|
||||||
|
if !found {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return fn(ifAddr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ifAddrAttrInit is called once at init()
|
||||||
|
func ifAddrAttrInit() {
|
||||||
|
// Sorted for human readability
|
||||||
|
ifAddrAttrs = []AttrName{
|
||||||
|
"flags",
|
||||||
|
"name",
|
||||||
|
}
|
||||||
|
|
||||||
|
ifAddrAttrMap = map[AttrName]func(ifAddr IfAddr) string{
|
||||||
|
"flags": func(ifAddr IfAddr) string {
|
||||||
|
return ifAddr.Interface.Flags.String()
|
||||||
|
},
|
||||||
|
"name": func(ifAddr IfAddr) string {
|
||||||
|
return ifAddr.Interface.Name
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
1281
vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go
generated
vendored
Normal file
1281
vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
65
vendor/github.com/hashicorp/go-sockaddr/ifattr.go
generated
vendored
Normal file
65
vendor/github.com/hashicorp/go-sockaddr/ifattr.go
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
||||||
|
package sockaddr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IfAddr is a union of a SockAddr and a net.Interface.
|
||||||
|
type IfAddr struct {
|
||||||
|
SockAddr
|
||||||
|
net.Interface
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attr returns the named attribute as a string
|
||||||
|
func (ifAddr IfAddr) Attr(attrName AttrName) (string, error) {
|
||||||
|
val := IfAddrAttr(ifAddr, attrName)
|
||||||
|
if val != "" {
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return Attr(ifAddr.SockAddr, attrName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attr returns the named attribute as a string
|
||||||
|
func Attr(sa SockAddr, attrName AttrName) (string, error) {
|
||||||
|
switch sockType := sa.Type(); {
|
||||||
|
case sockType&TypeIP != 0:
|
||||||
|
ip := *ToIPAddr(sa)
|
||||||
|
attrVal := IPAddrAttr(ip, attrName)
|
||||||
|
if attrVal != "" {
|
||||||
|
return attrVal, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if sockType == TypeIPv4 {
|
||||||
|
ipv4 := *ToIPv4Addr(sa)
|
||||||
|
attrVal := IPv4AddrAttr(ipv4, attrName)
|
||||||
|
if attrVal != "" {
|
||||||
|
return attrVal, nil
|
||||||
|
}
|
||||||
|
} else if sockType == TypeIPv6 {
|
||||||
|
ipv6 := *ToIPv6Addr(sa)
|
||||||
|
attrVal := IPv6AddrAttr(ipv6, attrName)
|
||||||
|
if attrVal != "" {
|
||||||
|
return attrVal, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case sockType == TypeUnix:
|
||||||
|
us := *ToUnixSock(sa)
|
||||||
|
attrVal := UnixSockAttr(us, attrName)
|
||||||
|
if attrVal != "" {
|
||||||
|
return attrVal, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Non type-specific attributes
|
||||||
|
switch attrName {
|
||||||
|
case "string":
|
||||||
|
return sa.String(), nil
|
||||||
|
case "type":
|
||||||
|
return sa.Type().String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", fmt.Errorf("unsupported attribute name %q", attrName)
|
||||||
|
}
|
169
vendor/github.com/hashicorp/go-sockaddr/ipaddr.go
generated
vendored
Normal file
169
vendor/github.com/hashicorp/go-sockaddr/ipaddr.go
generated
vendored
Normal file
|
@ -0,0 +1,169 @@
|
||||||
|
package sockaddr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Constants for the sizes of IPv3, IPv4, and IPv6 address types.
|
||||||
|
const (
|
||||||
|
IPv3len = 6
|
||||||
|
IPv4len = 4
|
||||||
|
IPv6len = 16
|
||||||
|
)
|
||||||
|
|
||||||
|
// IPAddr is a generic IP address interface for IPv4 and IPv6 addresses,
|
||||||
|
// networks, and socket endpoints.
|
||||||
|
type IPAddr interface {
|
||||||
|
SockAddr
|
||||||
|
AddressBinString() string
|
||||||
|
AddressHexString() string
|
||||||
|
Cmp(SockAddr) int
|
||||||
|
CmpAddress(SockAddr) int
|
||||||
|
CmpPort(SockAddr) int
|
||||||
|
FirstUsable() IPAddr
|
||||||
|
Host() IPAddr
|
||||||
|
IPPort() IPPort
|
||||||
|
LastUsable() IPAddr
|
||||||
|
Maskbits() int
|
||||||
|
NetIP() *net.IP
|
||||||
|
NetIPMask() *net.IPMask
|
||||||
|
NetIPNet() *net.IPNet
|
||||||
|
Network() IPAddr
|
||||||
|
Octets() []int
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPPort is the type for an IP port number for the TCP and UDP IP transports.
|
||||||
|
type IPPort uint16
|
||||||
|
|
||||||
|
// IPPrefixLen is a typed integer representing the prefix length for a given
|
||||||
|
// IPAddr.
|
||||||
|
type IPPrefixLen byte
|
||||||
|
|
||||||
|
// ipAddrAttrMap is a map of the IPAddr type-specific attributes.
|
||||||
|
var ipAddrAttrMap map[AttrName]func(IPAddr) string
|
||||||
|
var ipAddrAttrs []AttrName
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
ipAddrInit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIPAddr creates a new IPAddr from a string. Returns nil if the string is
|
||||||
|
// not an IPv4 or an IPv6 address.
|
||||||
|
func NewIPAddr(addr string) (IPAddr, error) {
|
||||||
|
ipv4Addr, err := NewIPv4Addr(addr)
|
||||||
|
if err == nil {
|
||||||
|
return ipv4Addr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ipv6Addr, err := NewIPv6Addr(addr)
|
||||||
|
if err == nil {
|
||||||
|
return ipv6Addr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("invalid IPAddr %v", addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPAddrAttr returns a string representation of an attribute for the given
|
||||||
|
// IPAddr.
|
||||||
|
func IPAddrAttr(ip IPAddr, selector AttrName) string {
|
||||||
|
fn, found := ipAddrAttrMap[selector]
|
||||||
|
if !found {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return fn(ip)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPAttrs returns a list of attributes supported by the IPAddr type
|
||||||
|
func IPAttrs() []AttrName {
|
||||||
|
return ipAddrAttrs
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustIPAddr is a helper method that must return an IPAddr or panic on invalid
|
||||||
|
// input.
|
||||||
|
func MustIPAddr(addr string) IPAddr {
|
||||||
|
ip, err := NewIPAddr(addr)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("Unable to create an IPAddr from %+q: %v", addr, err))
|
||||||
|
}
|
||||||
|
return ip
|
||||||
|
}
|
||||||
|
|
||||||
|
// ipAddrInit is called once at init()
|
||||||
|
func ipAddrInit() {
|
||||||
|
// Sorted for human readability
|
||||||
|
ipAddrAttrs = []AttrName{
|
||||||
|
"host",
|
||||||
|
"address",
|
||||||
|
"port",
|
||||||
|
"netmask",
|
||||||
|
"network",
|
||||||
|
"mask_bits",
|
||||||
|
"binary",
|
||||||
|
"hex",
|
||||||
|
"first_usable",
|
||||||
|
"last_usable",
|
||||||
|
"octets",
|
||||||
|
}
|
||||||
|
|
||||||
|
ipAddrAttrMap = map[AttrName]func(ip IPAddr) string{
|
||||||
|
"address": func(ip IPAddr) string {
|
||||||
|
return ip.NetIP().String()
|
||||||
|
},
|
||||||
|
"binary": func(ip IPAddr) string {
|
||||||
|
return ip.AddressBinString()
|
||||||
|
},
|
||||||
|
"first_usable": func(ip IPAddr) string {
|
||||||
|
return ip.FirstUsable().String()
|
||||||
|
},
|
||||||
|
"hex": func(ip IPAddr) string {
|
||||||
|
return ip.AddressHexString()
|
||||||
|
},
|
||||||
|
"host": func(ip IPAddr) string {
|
||||||
|
return ip.Host().String()
|
||||||
|
},
|
||||||
|
"last_usable": func(ip IPAddr) string {
|
||||||
|
return ip.LastUsable().String()
|
||||||
|
},
|
||||||
|
"mask_bits": func(ip IPAddr) string {
|
||||||
|
return fmt.Sprintf("%d", ip.Maskbits())
|
||||||
|
},
|
||||||
|
"netmask": func(ip IPAddr) string {
|
||||||
|
switch v := ip.(type) {
|
||||||
|
case IPv4Addr:
|
||||||
|
ipv4Mask := IPv4Addr{
|
||||||
|
Address: IPv4Address(v.Mask),
|
||||||
|
Mask: IPv4HostMask,
|
||||||
|
}
|
||||||
|
return ipv4Mask.String()
|
||||||
|
case IPv6Addr:
|
||||||
|
ipv6Mask := new(big.Int)
|
||||||
|
ipv6Mask.Set(v.Mask)
|
||||||
|
ipv6MaskAddr := IPv6Addr{
|
||||||
|
Address: IPv6Address(ipv6Mask),
|
||||||
|
Mask: ipv6HostMask,
|
||||||
|
}
|
||||||
|
return ipv6MaskAddr.String()
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("<unsupported type: %T>", ip)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"network": func(ip IPAddr) string {
|
||||||
|
return ip.Network().NetIP().String()
|
||||||
|
},
|
||||||
|
"octets": func(ip IPAddr) string {
|
||||||
|
octets := ip.Octets()
|
||||||
|
octetStrs := make([]string, 0, len(octets))
|
||||||
|
for _, octet := range octets {
|
||||||
|
octetStrs = append(octetStrs, fmt.Sprintf("%d", octet))
|
||||||
|
}
|
||||||
|
return strings.Join(octetStrs, " ")
|
||||||
|
},
|
||||||
|
"port": func(ip IPAddr) string {
|
||||||
|
return fmt.Sprintf("%d", ip.IPPort())
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
98
vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go
generated
vendored
Normal file
98
vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go
generated
vendored
Normal file
|
@ -0,0 +1,98 @@
|
||||||
|
package sockaddr
|
||||||
|
|
||||||
|
import "bytes"
|
||||||
|
|
||||||
|
type IPAddrs []IPAddr
|
||||||
|
|
||||||
|
func (s IPAddrs) Len() int { return len(s) }
|
||||||
|
func (s IPAddrs) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
|
||||||
|
// // SortIPAddrsByCmp is a type that satisfies sort.Interface and can be used
|
||||||
|
// // by the routines in this package. The SortIPAddrsByCmp type is used to
|
||||||
|
// // sort IPAddrs by Cmp()
|
||||||
|
// type SortIPAddrsByCmp struct{ IPAddrs }
|
||||||
|
|
||||||
|
// // Less reports whether the element with index i should sort before the
|
||||||
|
// // element with index j.
|
||||||
|
// func (s SortIPAddrsByCmp) Less(i, j int) bool {
|
||||||
|
// // Sort by Type, then address, then port number.
|
||||||
|
// return Less(s.IPAddrs[i], s.IPAddrs[j])
|
||||||
|
// }
|
||||||
|
|
||||||
|
// SortIPAddrsBySpecificMaskLen is a type that satisfies sort.Interface and
|
||||||
|
// can be used by the routines in this package. The
|
||||||
|
// SortIPAddrsBySpecificMaskLen type is used to sort IPAddrs by smallest
|
||||||
|
// network (most specific to largest network).
|
||||||
|
type SortIPAddrsByNetworkSize struct{ IPAddrs }
|
||||||
|
|
||||||
|
// Less reports whether the element with index i should sort before the
|
||||||
|
// element with index j.
|
||||||
|
func (s SortIPAddrsByNetworkSize) Less(i, j int) bool {
|
||||||
|
// Sort masks with a larger binary value (i.e. fewer hosts per network
|
||||||
|
// prefix) after masks with a smaller value (larger number of hosts per
|
||||||
|
// prefix).
|
||||||
|
switch bytes.Compare([]byte(*s.IPAddrs[i].NetIPMask()), []byte(*s.IPAddrs[j].NetIPMask())) {
|
||||||
|
case 0:
|
||||||
|
// Fall through to the second test if the net.IPMasks are the
|
||||||
|
// same.
|
||||||
|
break
|
||||||
|
case 1:
|
||||||
|
return true
|
||||||
|
case -1:
|
||||||
|
return false
|
||||||
|
default:
|
||||||
|
panic("bad, m'kay?")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort IPs based on the length (i.e. prefer IPv4 over IPv6).
|
||||||
|
iLen := len(*s.IPAddrs[i].NetIP())
|
||||||
|
jLen := len(*s.IPAddrs[j].NetIP())
|
||||||
|
if iLen != jLen {
|
||||||
|
return iLen > jLen
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort IPs based on their network address from lowest to highest.
|
||||||
|
switch bytes.Compare(s.IPAddrs[i].NetIPNet().IP, s.IPAddrs[j].NetIPNet().IP) {
|
||||||
|
case 0:
|
||||||
|
break
|
||||||
|
case 1:
|
||||||
|
return false
|
||||||
|
case -1:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
panic("lol wut?")
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a host does not have a port set, it always sorts after hosts
|
||||||
|
// that have a port (e.g. a host with a /32 and port number is more
|
||||||
|
// specific and should sort first over a host with a /32 but no port
|
||||||
|
// set).
|
||||||
|
if s.IPAddrs[i].IPPort() == 0 || s.IPAddrs[j].IPPort() == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return s.IPAddrs[i].IPPort() < s.IPAddrs[j].IPPort()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SortIPAddrsBySpecificMaskLen is a type that satisfies sort.Interface and
|
||||||
|
// can be used by the routines in this package. The
|
||||||
|
// SortIPAddrsBySpecificMaskLen type is used to sort IPAddrs by smallest
|
||||||
|
// network (most specific to largest network).
|
||||||
|
type SortIPAddrsBySpecificMaskLen struct{ IPAddrs }
|
||||||
|
|
||||||
|
// Less reports whether the element with index i should sort before the
|
||||||
|
// element with index j.
|
||||||
|
func (s SortIPAddrsBySpecificMaskLen) Less(i, j int) bool {
|
||||||
|
return s.IPAddrs[i].Maskbits() > s.IPAddrs[j].Maskbits()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SortIPAddrsByBroadMaskLen is a type that satisfies sort.Interface and can
|
||||||
|
// be used by the routines in this package. The SortIPAddrsByBroadMaskLen
|
||||||
|
// type is used to sort IPAddrs by largest network (i.e. largest subnets
|
||||||
|
// first).
|
||||||
|
type SortIPAddrsByBroadMaskLen struct{ IPAddrs }
|
||||||
|
|
||||||
|
// Less reports whether the element with index i should sort before the
|
||||||
|
// element with index j.
|
||||||
|
func (s SortIPAddrsByBroadMaskLen) Less(i, j int) bool {
|
||||||
|
return s.IPAddrs[i].Maskbits() < s.IPAddrs[j].Maskbits()
|
||||||
|
}
|
516
vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go
generated
vendored
Normal file
516
vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go
generated
vendored
Normal file
|
@ -0,0 +1,516 @@
|
||||||
|
package sockaddr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
// IPv4Address is a named type representing an IPv4 address.
|
||||||
|
IPv4Address uint32
|
||||||
|
|
||||||
|
// IPv4Network is a named type representing an IPv4 network.
|
||||||
|
IPv4Network uint32
|
||||||
|
|
||||||
|
// IPv4Mask is a named type representing an IPv4 network mask.
|
||||||
|
IPv4Mask uint32
|
||||||
|
)
|
||||||
|
|
||||||
|
// IPv4HostMask is a constant represents a /32 IPv4 Address
|
||||||
|
// (i.e. 255.255.255.255).
|
||||||
|
const IPv4HostMask = IPv4Mask(0xffffffff)
|
||||||
|
|
||||||
|
// ipv4AddrAttrMap is a map of the IPv4Addr type-specific attributes.
|
||||||
|
var ipv4AddrAttrMap map[AttrName]func(IPv4Addr) string
|
||||||
|
var ipv4AddrAttrs []AttrName
|
||||||
|
var trailingHexNetmaskRE *regexp.Regexp
|
||||||
|
|
||||||
|
// IPv4Addr implements a convenience wrapper around the union of Go's
|
||||||
|
// built-in net.IP and net.IPNet types. In UNIX-speak, IPv4Addr implements
|
||||||
|
// `sockaddr` when the the address family is set to AF_INET
|
||||||
|
// (i.e. `sockaddr_in`).
|
||||||
|
type IPv4Addr struct {
|
||||||
|
IPAddr
|
||||||
|
Address IPv4Address
|
||||||
|
Mask IPv4Mask
|
||||||
|
Port IPPort
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
ipv4AddrInit()
|
||||||
|
trailingHexNetmaskRE = regexp.MustCompile(`/([0f]{8})$`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIPv4Addr creates an IPv4Addr from a string. String can be in the form
|
||||||
|
// of either an IPv4:port (e.g. `1.2.3.4:80`, in which case the mask is
|
||||||
|
// assumed to be a `/32`), an IPv4 address (e.g. `1.2.3.4`, also with a `/32`
|
||||||
|
// mask), or an IPv4 CIDR (e.g. `1.2.3.4/24`, which has its IP port
|
||||||
|
// initialized to zero). ipv4Str can not be a hostname.
|
||||||
|
//
|
||||||
|
// NOTE: Many net.*() routines will initialize and return an IPv6 address.
|
||||||
|
// To create uint32 values from net.IP, always test to make sure the address
|
||||||
|
// returned can be converted to a 4 byte array using To4().
|
||||||
|
func NewIPv4Addr(ipv4Str string) (IPv4Addr, error) {
|
||||||
|
// Strip off any bogus hex-encoded netmasks that will be mis-parsed by Go. In
|
||||||
|
// particular, clients with the Barracuda VPN client will see something like:
|
||||||
|
// `192.168.3.51/00ffffff` as their IP address.
|
||||||
|
trailingHexNetmaskRe := trailingHexNetmaskRE.Copy()
|
||||||
|
if match := trailingHexNetmaskRe.FindStringIndex(ipv4Str); match != nil {
|
||||||
|
ipv4Str = ipv4Str[:match[0]]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse as an IPv4 CIDR
|
||||||
|
ipAddr, network, err := net.ParseCIDR(ipv4Str)
|
||||||
|
if err == nil {
|
||||||
|
ipv4 := ipAddr.To4()
|
||||||
|
if ipv4 == nil {
|
||||||
|
return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address", ipv4Str)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we see an IPv6 netmask, convert it to an IPv4 mask.
|
||||||
|
netmaskSepPos := strings.LastIndexByte(ipv4Str, '/')
|
||||||
|
if netmaskSepPos != -1 && netmaskSepPos+1 < len(ipv4Str) {
|
||||||
|
netMask, err := strconv.ParseUint(ipv4Str[netmaskSepPos+1:], 10, 8)
|
||||||
|
if err != nil {
|
||||||
|
return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address: unable to parse CIDR netmask: %v", ipv4Str, err)
|
||||||
|
} else if netMask > 128 {
|
||||||
|
return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address: invalid CIDR netmask", ipv4Str)
|
||||||
|
}
|
||||||
|
|
||||||
|
if netMask >= 96 {
|
||||||
|
// Convert the IPv6 netmask to an IPv4 netmask
|
||||||
|
network.Mask = net.CIDRMask(int(netMask-96), IPv4len*8)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ipv4Addr := IPv4Addr{
|
||||||
|
Address: IPv4Address(binary.BigEndian.Uint32(ipv4)),
|
||||||
|
Mask: IPv4Mask(binary.BigEndian.Uint32(network.Mask)),
|
||||||
|
}
|
||||||
|
return ipv4Addr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempt to parse ipv4Str as a /32 host with a port number.
|
||||||
|
tcpAddr, err := net.ResolveTCPAddr("tcp4", ipv4Str)
|
||||||
|
if err == nil {
|
||||||
|
ipv4 := tcpAddr.IP.To4()
|
||||||
|
if ipv4 == nil {
|
||||||
|
return IPv4Addr{}, fmt.Errorf("Unable to resolve %+q as an IPv4 address", ipv4Str)
|
||||||
|
}
|
||||||
|
|
||||||
|
ipv4Uint32 := binary.BigEndian.Uint32(ipv4)
|
||||||
|
ipv4Addr := IPv4Addr{
|
||||||
|
Address: IPv4Address(ipv4Uint32),
|
||||||
|
Mask: IPv4HostMask,
|
||||||
|
Port: IPPort(tcpAddr.Port),
|
||||||
|
}
|
||||||
|
|
||||||
|
return ipv4Addr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse as a naked IPv4 address
|
||||||
|
ip := net.ParseIP(ipv4Str)
|
||||||
|
if ip != nil {
|
||||||
|
ipv4 := ip.To4()
|
||||||
|
if ipv4 == nil {
|
||||||
|
return IPv4Addr{}, fmt.Errorf("Unable to string convert %+q to an IPv4 address", ipv4Str)
|
||||||
|
}
|
||||||
|
|
||||||
|
ipv4Uint32 := binary.BigEndian.Uint32(ipv4)
|
||||||
|
ipv4Addr := IPv4Addr{
|
||||||
|
Address: IPv4Address(ipv4Uint32),
|
||||||
|
Mask: IPv4HostMask,
|
||||||
|
}
|
||||||
|
return ipv4Addr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return IPv4Addr{}, fmt.Errorf("Unable to parse %+q to an IPv4 address: %v", ipv4Str, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddressBinString returns a string with the IPv4Addr's Address represented
|
||||||
|
// as a sequence of '0' and '1' characters. This method is useful for
|
||||||
|
// debugging or by operators who want to inspect an address.
|
||||||
|
func (ipv4 IPv4Addr) AddressBinString() string {
|
||||||
|
return fmt.Sprintf("%032s", strconv.FormatUint(uint64(ipv4.Address), 2))
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddressHexString returns a string with the IPv4Addr address represented as
|
||||||
|
// a sequence of hex characters. This method is useful for debugging or by
|
||||||
|
// operators who want to inspect an address.
|
||||||
|
func (ipv4 IPv4Addr) AddressHexString() string {
|
||||||
|
return fmt.Sprintf("%08s", strconv.FormatUint(uint64(ipv4.Address), 16))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Broadcast is an IPv4Addr-only method that returns the broadcast address of
|
||||||
|
// the network.
|
||||||
|
//
|
||||||
|
// NOTE: IPv6 only supports multicast, so this method only exists for
|
||||||
|
// IPv4Addr.
|
||||||
|
func (ipv4 IPv4Addr) Broadcast() IPAddr {
|
||||||
|
// Nothing should listen on a broadcast address.
|
||||||
|
return IPv4Addr{
|
||||||
|
Address: IPv4Address(ipv4.BroadcastAddress()),
|
||||||
|
Mask: IPv4HostMask,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BroadcastAddress returns a IPv4Network of the IPv4Addr's broadcast
|
||||||
|
// address.
|
||||||
|
func (ipv4 IPv4Addr) BroadcastAddress() IPv4Network {
|
||||||
|
return IPv4Network(uint32(ipv4.Address)&uint32(ipv4.Mask) | ^uint32(ipv4.Mask))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CmpAddress follows the Cmp() standard protocol and returns:
|
||||||
|
//
|
||||||
|
// - -1 If the receiver should sort first because its address is lower than arg
|
||||||
|
// - 0 if the SockAddr arg is equal to the receiving IPv4Addr or the argument is
|
||||||
|
// of a different type.
|
||||||
|
// - 1 If the argument should sort first.
|
||||||
|
func (ipv4 IPv4Addr) CmpAddress(sa SockAddr) int {
|
||||||
|
ipv4b, ok := sa.(IPv4Addr)
|
||||||
|
if !ok {
|
||||||
|
return sortDeferDecision
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case ipv4.Address == ipv4b.Address:
|
||||||
|
return sortDeferDecision
|
||||||
|
case ipv4.Address < ipv4b.Address:
|
||||||
|
return sortReceiverBeforeArg
|
||||||
|
default:
|
||||||
|
return sortArgBeforeReceiver
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CmpPort follows the Cmp() standard protocol and returns:
|
||||||
|
//
|
||||||
|
// - -1 If the receiver should sort first because its port is lower than arg
|
||||||
|
// - 0 if the SockAddr arg's port number is equal to the receiving IPv4Addr,
|
||||||
|
// regardless of type.
|
||||||
|
// - 1 If the argument should sort first.
|
||||||
|
func (ipv4 IPv4Addr) CmpPort(sa SockAddr) int {
|
||||||
|
var saPort IPPort
|
||||||
|
switch v := sa.(type) {
|
||||||
|
case IPv4Addr:
|
||||||
|
saPort = v.Port
|
||||||
|
case IPv6Addr:
|
||||||
|
saPort = v.Port
|
||||||
|
default:
|
||||||
|
return sortDeferDecision
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case ipv4.Port == saPort:
|
||||||
|
return sortDeferDecision
|
||||||
|
case ipv4.Port < saPort:
|
||||||
|
return sortReceiverBeforeArg
|
||||||
|
default:
|
||||||
|
return sortArgBeforeReceiver
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CmpRFC follows the Cmp() standard protocol and returns:
|
||||||
|
//
|
||||||
|
// - -1 If the receiver should sort first because it belongs to the RFC and its
|
||||||
|
// arg does not
|
||||||
|
// - 0 if the receiver and arg both belong to the same RFC or neither do.
|
||||||
|
// - 1 If the arg belongs to the RFC but receiver does not.
|
||||||
|
func (ipv4 IPv4Addr) CmpRFC(rfcNum uint, sa SockAddr) int {
|
||||||
|
recvInRFC := IsRFC(rfcNum, ipv4)
|
||||||
|
ipv4b, ok := sa.(IPv4Addr)
|
||||||
|
if !ok {
|
||||||
|
// If the receiver is part of the desired RFC and the SockAddr
|
||||||
|
// argument is not, return -1 so that the receiver sorts before
|
||||||
|
// the non-IPv4 SockAddr. Conversely, if the receiver is not
|
||||||
|
// part of the RFC, punt on sorting and leave it for the next
|
||||||
|
// sorter.
|
||||||
|
if recvInRFC {
|
||||||
|
return sortReceiverBeforeArg
|
||||||
|
} else {
|
||||||
|
return sortDeferDecision
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
argInRFC := IsRFC(rfcNum, ipv4b)
|
||||||
|
switch {
|
||||||
|
case (recvInRFC && argInRFC), (!recvInRFC && !argInRFC):
|
||||||
|
// If a and b both belong to the RFC, or neither belong to
|
||||||
|
// rfcNum, defer sorting to the next sorter.
|
||||||
|
return sortDeferDecision
|
||||||
|
case recvInRFC && !argInRFC:
|
||||||
|
return sortReceiverBeforeArg
|
||||||
|
default:
|
||||||
|
return sortArgBeforeReceiver
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains returns true if the SockAddr is contained within the receiver.
|
||||||
|
func (ipv4 IPv4Addr) Contains(sa SockAddr) bool {
|
||||||
|
ipv4b, ok := sa.(IPv4Addr)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return ipv4.ContainsNetwork(ipv4b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainsAddress returns true if the IPv4Address is contained within the
|
||||||
|
// receiver.
|
||||||
|
func (ipv4 IPv4Addr) ContainsAddress(x IPv4Address) bool {
|
||||||
|
return IPv4Address(ipv4.NetworkAddress()) <= x &&
|
||||||
|
IPv4Address(ipv4.BroadcastAddress()) >= x
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainsNetwork returns true if the network from IPv4Addr is contained
|
||||||
|
// within the receiver.
|
||||||
|
func (ipv4 IPv4Addr) ContainsNetwork(x IPv4Addr) bool {
|
||||||
|
return ipv4.NetworkAddress() <= x.NetworkAddress() &&
|
||||||
|
ipv4.BroadcastAddress() >= x.BroadcastAddress()
|
||||||
|
}
|
||||||
|
|
||||||
|
// DialPacketArgs returns the arguments required to be passed to
|
||||||
|
// net.DialUDP(). If the Mask of ipv4 is not a /32 or the Port is 0,
|
||||||
|
// DialPacketArgs() will fail. See Host() to create an IPv4Addr with its
|
||||||
|
// mask set to /32.
|
||||||
|
func (ipv4 IPv4Addr) DialPacketArgs() (network, dialArgs string) {
|
||||||
|
if ipv4.Mask != IPv4HostMask || ipv4.Port == 0 {
|
||||||
|
return "udp4", ""
|
||||||
|
}
|
||||||
|
return "udp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DialStreamArgs returns the arguments required to be passed to
|
||||||
|
// net.DialTCP(). If the Mask of ipv4 is not a /32 or the Port is 0,
|
||||||
|
// DialStreamArgs() will fail. See Host() to create an IPv4Addr with its
|
||||||
|
// mask set to /32.
|
||||||
|
func (ipv4 IPv4Addr) DialStreamArgs() (network, dialArgs string) {
|
||||||
|
if ipv4.Mask != IPv4HostMask || ipv4.Port == 0 {
|
||||||
|
return "tcp4", ""
|
||||||
|
}
|
||||||
|
return "tcp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal returns true if a SockAddr is equal to the receiving IPv4Addr.
|
||||||
|
func (ipv4 IPv4Addr) Equal(sa SockAddr) bool {
|
||||||
|
ipv4b, ok := sa.(IPv4Addr)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if ipv4.Port != ipv4b.Port {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if ipv4.Address != ipv4b.Address {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if ipv4.NetIPNet().String() != ipv4b.NetIPNet().String() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstUsable returns an IPv4Addr set to the first address following the
|
||||||
|
// network prefix. The first usable address in a network is normally the
|
||||||
|
// gateway and should not be used except by devices forwarding packets
|
||||||
|
// between two administratively distinct networks (i.e. a router). This
|
||||||
|
// function does not discriminate against first usable vs "first address that
|
||||||
|
// should be used." For example, FirstUsable() on "192.168.1.10/24" would
|
||||||
|
// return the address "192.168.1.1/24".
|
||||||
|
func (ipv4 IPv4Addr) FirstUsable() IPAddr {
|
||||||
|
addr := ipv4.NetworkAddress()
|
||||||
|
|
||||||
|
// If /32, return the address itself. If /31 assume a point-to-point
|
||||||
|
// link and return the lower address.
|
||||||
|
if ipv4.Maskbits() < 31 {
|
||||||
|
addr++
|
||||||
|
}
|
||||||
|
|
||||||
|
return IPv4Addr{
|
||||||
|
Address: IPv4Address(addr),
|
||||||
|
Mask: IPv4HostMask,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Host returns a copy of ipv4 with its mask set to /32 so that it can be
|
||||||
|
// used by DialPacketArgs(), DialStreamArgs(), ListenPacketArgs(), or
|
||||||
|
// ListenStreamArgs().
|
||||||
|
func (ipv4 IPv4Addr) Host() IPAddr {
|
||||||
|
// Nothing should listen on a broadcast address.
|
||||||
|
return IPv4Addr{
|
||||||
|
Address: ipv4.Address,
|
||||||
|
Mask: IPv4HostMask,
|
||||||
|
Port: ipv4.Port,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPPort returns the Port number attached to the IPv4Addr
|
||||||
|
func (ipv4 IPv4Addr) IPPort() IPPort {
|
||||||
|
return ipv4.Port
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastUsable returns the last address before the broadcast address in a
|
||||||
|
// given network.
|
||||||
|
func (ipv4 IPv4Addr) LastUsable() IPAddr {
|
||||||
|
addr := ipv4.BroadcastAddress()
|
||||||
|
|
||||||
|
// If /32, return the address itself. If /31 assume a point-to-point
|
||||||
|
// link and return the upper address.
|
||||||
|
if ipv4.Maskbits() < 31 {
|
||||||
|
addr--
|
||||||
|
}
|
||||||
|
|
||||||
|
return IPv4Addr{
|
||||||
|
Address: IPv4Address(addr),
|
||||||
|
Mask: IPv4HostMask,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListenPacketArgs returns the arguments required to be passed to
|
||||||
|
// net.ListenUDP(). If the Mask of ipv4 is not a /32, ListenPacketArgs()
|
||||||
|
// will fail. See Host() to create an IPv4Addr with its mask set to /32.
|
||||||
|
func (ipv4 IPv4Addr) ListenPacketArgs() (network, listenArgs string) {
|
||||||
|
if ipv4.Mask != IPv4HostMask {
|
||||||
|
return "udp4", ""
|
||||||
|
}
|
||||||
|
return "udp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListenStreamArgs returns the arguments required to be passed to
|
||||||
|
// net.ListenTCP(). If the Mask of ipv4 is not a /32, ListenStreamArgs()
|
||||||
|
// will fail. See Host() to create an IPv4Addr with its mask set to /32.
|
||||||
|
func (ipv4 IPv4Addr) ListenStreamArgs() (network, listenArgs string) {
|
||||||
|
if ipv4.Mask != IPv4HostMask {
|
||||||
|
return "tcp4", ""
|
||||||
|
}
|
||||||
|
return "tcp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Maskbits returns the number of network mask bits in a given IPv4Addr. For
|
||||||
|
// example, the Maskbits() of "192.168.1.1/24" would return 24.
|
||||||
|
func (ipv4 IPv4Addr) Maskbits() int {
|
||||||
|
mask := make(net.IPMask, IPv4len)
|
||||||
|
binary.BigEndian.PutUint32(mask, uint32(ipv4.Mask))
|
||||||
|
maskOnes, _ := mask.Size()
|
||||||
|
return maskOnes
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustIPv4Addr is a helper method that must return an IPv4Addr or panic on
|
||||||
|
// invalid input.
|
||||||
|
func MustIPv4Addr(addr string) IPv4Addr {
|
||||||
|
ipv4, err := NewIPv4Addr(addr)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("Unable to create an IPv4Addr from %+q: %v", addr, err))
|
||||||
|
}
|
||||||
|
return ipv4
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetIP returns the address as a net.IP (address is always presized to
|
||||||
|
// IPv4).
|
||||||
|
func (ipv4 IPv4Addr) NetIP() *net.IP {
|
||||||
|
x := make(net.IP, IPv4len)
|
||||||
|
binary.BigEndian.PutUint32(x, uint32(ipv4.Address))
|
||||||
|
return &x
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetIPMask create a new net.IPMask from the IPv4Addr.
|
||||||
|
func (ipv4 IPv4Addr) NetIPMask() *net.IPMask {
|
||||||
|
ipv4Mask := net.IPMask{}
|
||||||
|
ipv4Mask = make(net.IPMask, IPv4len)
|
||||||
|
binary.BigEndian.PutUint32(ipv4Mask, uint32(ipv4.Mask))
|
||||||
|
return &ipv4Mask
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetIPNet create a new net.IPNet from the IPv4Addr.
|
||||||
|
func (ipv4 IPv4Addr) NetIPNet() *net.IPNet {
|
||||||
|
ipv4net := &net.IPNet{}
|
||||||
|
ipv4net.IP = make(net.IP, IPv4len)
|
||||||
|
binary.BigEndian.PutUint32(ipv4net.IP, uint32(ipv4.NetworkAddress()))
|
||||||
|
ipv4net.Mask = *ipv4.NetIPMask()
|
||||||
|
return ipv4net
|
||||||
|
}
|
||||||
|
|
||||||
|
// Network returns the network prefix or network address for a given network.
|
||||||
|
func (ipv4 IPv4Addr) Network() IPAddr {
|
||||||
|
return IPv4Addr{
|
||||||
|
Address: IPv4Address(ipv4.NetworkAddress()),
|
||||||
|
Mask: ipv4.Mask,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetworkAddress returns an IPv4Network of the IPv4Addr's network address.
|
||||||
|
func (ipv4 IPv4Addr) NetworkAddress() IPv4Network {
|
||||||
|
return IPv4Network(uint32(ipv4.Address) & uint32(ipv4.Mask))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Octets returns a slice of the four octets in an IPv4Addr's Address. The
|
||||||
|
// order of the bytes is big endian.
|
||||||
|
func (ipv4 IPv4Addr) Octets() []int {
|
||||||
|
return []int{
|
||||||
|
int(ipv4.Address >> 24),
|
||||||
|
int((ipv4.Address >> 16) & 0xff),
|
||||||
|
int((ipv4.Address >> 8) & 0xff),
|
||||||
|
int(ipv4.Address & 0xff),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation of the IPv4Addr
|
||||||
|
func (ipv4 IPv4Addr) String() string {
|
||||||
|
if ipv4.Port != 0 {
|
||||||
|
return fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ipv4.Maskbits() == 32 {
|
||||||
|
return ipv4.NetIP().String()
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("%s/%d", ipv4.NetIP().String(), ipv4.Maskbits())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type is used as a type switch and returns TypeIPv4
|
||||||
|
func (IPv4Addr) Type() SockAddrType {
|
||||||
|
return TypeIPv4
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPv4AddrAttr returns a string representation of an attribute for the given
|
||||||
|
// IPv4Addr.
|
||||||
|
func IPv4AddrAttr(ipv4 IPv4Addr, selector AttrName) string {
|
||||||
|
fn, found := ipv4AddrAttrMap[selector]
|
||||||
|
if !found {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return fn(ipv4)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPv4Attrs returns a list of attributes supported by the IPv4Addr type
|
||||||
|
func IPv4Attrs() []AttrName {
|
||||||
|
return ipv4AddrAttrs
|
||||||
|
}
|
||||||
|
|
||||||
|
// ipv4AddrInit is called once at init()
|
||||||
|
func ipv4AddrInit() {
|
||||||
|
// Sorted for human readability
|
||||||
|
ipv4AddrAttrs = []AttrName{
|
||||||
|
"size", // Same position as in IPv6 for output consistency
|
||||||
|
"broadcast",
|
||||||
|
"uint32",
|
||||||
|
}
|
||||||
|
|
||||||
|
ipv4AddrAttrMap = map[AttrName]func(ipv4 IPv4Addr) string{
|
||||||
|
"broadcast": func(ipv4 IPv4Addr) string {
|
||||||
|
return ipv4.Broadcast().String()
|
||||||
|
},
|
||||||
|
"size": func(ipv4 IPv4Addr) string {
|
||||||
|
return fmt.Sprintf("%d", 1<<uint(IPv4len*8-ipv4.Maskbits()))
|
||||||
|
},
|
||||||
|
"uint32": func(ipv4 IPv4Addr) string {
|
||||||
|
return fmt.Sprintf("%d", uint32(ipv4.Address))
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
591
vendor/github.com/hashicorp/go-sockaddr/ipv6addr.go
generated
vendored
Normal file
591
vendor/github.com/hashicorp/go-sockaddr/ipv6addr.go
generated
vendored
Normal file
|
@ -0,0 +1,591 @@
|
||||||
|
package sockaddr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"net"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
// IPv6Address is a named type representing an IPv6 address.
|
||||||
|
IPv6Address *big.Int
|
||||||
|
|
||||||
|
// IPv6Network is a named type representing an IPv6 network.
|
||||||
|
IPv6Network *big.Int
|
||||||
|
|
||||||
|
// IPv6Mask is a named type representing an IPv6 network mask.
|
||||||
|
IPv6Mask *big.Int
|
||||||
|
)
|
||||||
|
|
||||||
|
// IPv6HostPrefix is a constant represents a /128 IPv6 Prefix.
|
||||||
|
const IPv6HostPrefix = IPPrefixLen(128)
|
||||||
|
|
||||||
|
// ipv6HostMask is an unexported big.Int representing a /128 IPv6 address.
|
||||||
|
// This value must be a constant and always set to all ones.
|
||||||
|
var ipv6HostMask IPv6Mask
|
||||||
|
|
||||||
|
// ipv6AddrAttrMap is a map of the IPv6Addr type-specific attributes.
|
||||||
|
var ipv6AddrAttrMap map[AttrName]func(IPv6Addr) string
|
||||||
|
var ipv6AddrAttrs []AttrName
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
biMask := new(big.Int)
|
||||||
|
biMask.SetBytes([]byte{
|
||||||
|
0xff, 0xff,
|
||||||
|
0xff, 0xff,
|
||||||
|
0xff, 0xff,
|
||||||
|
0xff, 0xff,
|
||||||
|
0xff, 0xff,
|
||||||
|
0xff, 0xff,
|
||||||
|
0xff, 0xff,
|
||||||
|
0xff, 0xff,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
ipv6HostMask = IPv6Mask(biMask)
|
||||||
|
|
||||||
|
ipv6AddrInit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPv6Addr implements a convenience wrapper around the union of Go's
|
||||||
|
// built-in net.IP and net.IPNet types. In UNIX-speak, IPv6Addr implements
|
||||||
|
// `sockaddr` when the the address family is set to AF_INET6
|
||||||
|
// (i.e. `sockaddr_in6`).
|
||||||
|
type IPv6Addr struct {
|
||||||
|
IPAddr
|
||||||
|
Address IPv6Address
|
||||||
|
Mask IPv6Mask
|
||||||
|
Port IPPort
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIPv6Addr creates an IPv6Addr from a string. String can be in the form of
|
||||||
|
// an an IPv6:port (e.g. `[2001:4860:0:2001::68]:80`, in which case the mask is
|
||||||
|
// assumed to be a /128), an IPv6 address (e.g. `2001:4860:0:2001::68`, also
|
||||||
|
// with a `/128` mask), an IPv6 CIDR (e.g. `2001:4860:0:2001::68/64`, which has
|
||||||
|
// its IP port initialized to zero). ipv6Str can not be a hostname.
|
||||||
|
//
|
||||||
|
// NOTE: Many net.*() routines will initialize and return an IPv4 address.
|
||||||
|
// Always test to make sure the address returned cannot be converted to a 4 byte
|
||||||
|
// array using To4().
|
||||||
|
func NewIPv6Addr(ipv6Str string) (IPv6Addr, error) {
|
||||||
|
v6Addr := false
|
||||||
|
LOOP:
|
||||||
|
for i := 0; i < len(ipv6Str); i++ {
|
||||||
|
switch ipv6Str[i] {
|
||||||
|
case '.':
|
||||||
|
break LOOP
|
||||||
|
case ':':
|
||||||
|
v6Addr = true
|
||||||
|
break LOOP
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !v6Addr {
|
||||||
|
return IPv6Addr{}, fmt.Errorf("Unable to resolve %+q as an IPv6 address, appears to be an IPv4 address", ipv6Str)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempt to parse ipv6Str as a /128 host with a port number.
|
||||||
|
tcpAddr, err := net.ResolveTCPAddr("tcp6", ipv6Str)
|
||||||
|
if err == nil {
|
||||||
|
ipv6 := tcpAddr.IP.To16()
|
||||||
|
if ipv6 == nil {
|
||||||
|
return IPv6Addr{}, fmt.Errorf("Unable to resolve %+q as a 16byte IPv6 address", ipv6Str)
|
||||||
|
}
|
||||||
|
|
||||||
|
ipv6BigIntAddr := new(big.Int)
|
||||||
|
ipv6BigIntAddr.SetBytes(ipv6)
|
||||||
|
|
||||||
|
ipv6BigIntMask := new(big.Int)
|
||||||
|
ipv6BigIntMask.Set(ipv6HostMask)
|
||||||
|
|
||||||
|
ipv6Addr := IPv6Addr{
|
||||||
|
Address: IPv6Address(ipv6BigIntAddr),
|
||||||
|
Mask: IPv6Mask(ipv6BigIntMask),
|
||||||
|
Port: IPPort(tcpAddr.Port),
|
||||||
|
}
|
||||||
|
|
||||||
|
return ipv6Addr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse as a naked IPv6 address. Trim square brackets if present.
|
||||||
|
if len(ipv6Str) > 2 && ipv6Str[0] == '[' && ipv6Str[len(ipv6Str)-1] == ']' {
|
||||||
|
ipv6Str = ipv6Str[1 : len(ipv6Str)-1]
|
||||||
|
}
|
||||||
|
ip := net.ParseIP(ipv6Str)
|
||||||
|
if ip != nil {
|
||||||
|
ipv6 := ip.To16()
|
||||||
|
if ipv6 == nil {
|
||||||
|
return IPv6Addr{}, fmt.Errorf("Unable to string convert %+q to a 16byte IPv6 address", ipv6Str)
|
||||||
|
}
|
||||||
|
|
||||||
|
ipv6BigIntAddr := new(big.Int)
|
||||||
|
ipv6BigIntAddr.SetBytes(ipv6)
|
||||||
|
|
||||||
|
ipv6BigIntMask := new(big.Int)
|
||||||
|
ipv6BigIntMask.Set(ipv6HostMask)
|
||||||
|
|
||||||
|
return IPv6Addr{
|
||||||
|
Address: IPv6Address(ipv6BigIntAddr),
|
||||||
|
Mask: IPv6Mask(ipv6BigIntMask),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse as an IPv6 CIDR
|
||||||
|
ipAddr, network, err := net.ParseCIDR(ipv6Str)
|
||||||
|
if err == nil {
|
||||||
|
ipv6 := ipAddr.To16()
|
||||||
|
if ipv6 == nil {
|
||||||
|
return IPv6Addr{}, fmt.Errorf("Unable to convert %+q to a 16byte IPv6 address", ipv6Str)
|
||||||
|
}
|
||||||
|
|
||||||
|
ipv6BigIntAddr := new(big.Int)
|
||||||
|
ipv6BigIntAddr.SetBytes(ipv6)
|
||||||
|
|
||||||
|
ipv6BigIntMask := new(big.Int)
|
||||||
|
ipv6BigIntMask.SetBytes(network.Mask)
|
||||||
|
|
||||||
|
ipv6Addr := IPv6Addr{
|
||||||
|
Address: IPv6Address(ipv6BigIntAddr),
|
||||||
|
Mask: IPv6Mask(ipv6BigIntMask),
|
||||||
|
}
|
||||||
|
return ipv6Addr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return IPv6Addr{}, fmt.Errorf("Unable to parse %+q to an IPv6 address: %v", ipv6Str, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddressBinString returns a string with the IPv6Addr's Address represented
|
||||||
|
// as a sequence of '0' and '1' characters. This method is useful for
|
||||||
|
// debugging or by operators who want to inspect an address.
|
||||||
|
func (ipv6 IPv6Addr) AddressBinString() string {
|
||||||
|
bi := big.Int(*ipv6.Address)
|
||||||
|
return fmt.Sprintf("%0128s", bi.Text(2))
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddressHexString returns a string with the IPv6Addr address represented as
|
||||||
|
// a sequence of hex characters. This method is useful for debugging or by
|
||||||
|
// operators who want to inspect an address.
|
||||||
|
func (ipv6 IPv6Addr) AddressHexString() string {
|
||||||
|
bi := big.Int(*ipv6.Address)
|
||||||
|
return fmt.Sprintf("%032s", bi.Text(16))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CmpAddress follows the Cmp() standard protocol and returns:
|
||||||
|
//
|
||||||
|
// - -1 If the receiver should sort first because its address is lower than arg
|
||||||
|
// - 0 if the SockAddr arg equal to the receiving IPv6Addr or the argument is of a
|
||||||
|
// different type.
|
||||||
|
// - 1 If the argument should sort first.
|
||||||
|
func (ipv6 IPv6Addr) CmpAddress(sa SockAddr) int {
|
||||||
|
ipv6b, ok := sa.(IPv6Addr)
|
||||||
|
if !ok {
|
||||||
|
return sortDeferDecision
|
||||||
|
}
|
||||||
|
|
||||||
|
ipv6aBigInt := new(big.Int)
|
||||||
|
ipv6aBigInt.Set(ipv6.Address)
|
||||||
|
ipv6bBigInt := new(big.Int)
|
||||||
|
ipv6bBigInt.Set(ipv6b.Address)
|
||||||
|
|
||||||
|
return ipv6aBigInt.Cmp(ipv6bBigInt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CmpPort follows the Cmp() standard protocol and returns:
|
||||||
|
//
|
||||||
|
// - -1 If the receiver should sort first because its port is lower than arg
|
||||||
|
// - 0 if the SockAddr arg's port number is equal to the receiving IPv6Addr,
|
||||||
|
// regardless of type.
|
||||||
|
// - 1 If the argument should sort first.
|
||||||
|
func (ipv6 IPv6Addr) CmpPort(sa SockAddr) int {
|
||||||
|
var saPort IPPort
|
||||||
|
switch v := sa.(type) {
|
||||||
|
case IPv4Addr:
|
||||||
|
saPort = v.Port
|
||||||
|
case IPv6Addr:
|
||||||
|
saPort = v.Port
|
||||||
|
default:
|
||||||
|
return sortDeferDecision
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case ipv6.Port == saPort:
|
||||||
|
return sortDeferDecision
|
||||||
|
case ipv6.Port < saPort:
|
||||||
|
return sortReceiverBeforeArg
|
||||||
|
default:
|
||||||
|
return sortArgBeforeReceiver
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CmpRFC follows the Cmp() standard protocol and returns:
|
||||||
|
//
|
||||||
|
// - -1 If the receiver should sort first because it belongs to the RFC and its
|
||||||
|
// arg does not
|
||||||
|
// - 0 if the receiver and arg both belong to the same RFC or neither do.
|
||||||
|
// - 1 If the arg belongs to the RFC but receiver does not.
|
||||||
|
func (ipv6 IPv6Addr) CmpRFC(rfcNum uint, sa SockAddr) int {
|
||||||
|
recvInRFC := IsRFC(rfcNum, ipv6)
|
||||||
|
ipv6b, ok := sa.(IPv6Addr)
|
||||||
|
if !ok {
|
||||||
|
// If the receiver is part of the desired RFC and the SockAddr
|
||||||
|
// argument is not, sort receiver before the non-IPv6 SockAddr.
|
||||||
|
// Conversely, if the receiver is not part of the RFC, punt on
|
||||||
|
// sorting and leave it for the next sorter.
|
||||||
|
if recvInRFC {
|
||||||
|
return sortReceiverBeforeArg
|
||||||
|
} else {
|
||||||
|
return sortDeferDecision
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
argInRFC := IsRFC(rfcNum, ipv6b)
|
||||||
|
switch {
|
||||||
|
case (recvInRFC && argInRFC), (!recvInRFC && !argInRFC):
|
||||||
|
// If a and b both belong to the RFC, or neither belong to
|
||||||
|
// rfcNum, defer sorting to the next sorter.
|
||||||
|
return sortDeferDecision
|
||||||
|
case recvInRFC && !argInRFC:
|
||||||
|
return sortReceiverBeforeArg
|
||||||
|
default:
|
||||||
|
return sortArgBeforeReceiver
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains returns true if the SockAddr is contained within the receiver.
|
||||||
|
func (ipv6 IPv6Addr) Contains(sa SockAddr) bool {
|
||||||
|
ipv6b, ok := sa.(IPv6Addr)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return ipv6.ContainsNetwork(ipv6b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainsAddress returns true if the IPv6Address is contained within the
|
||||||
|
// receiver.
|
||||||
|
func (ipv6 IPv6Addr) ContainsAddress(x IPv6Address) bool {
|
||||||
|
xAddr := IPv6Addr{
|
||||||
|
Address: x,
|
||||||
|
Mask: ipv6HostMask,
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
xIPv6 := xAddr.FirstUsable().(IPv6Addr)
|
||||||
|
yIPv6 := ipv6.FirstUsable().(IPv6Addr)
|
||||||
|
if xIPv6.CmpAddress(yIPv6) >= 1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
xIPv6 := xAddr.LastUsable().(IPv6Addr)
|
||||||
|
yIPv6 := ipv6.LastUsable().(IPv6Addr)
|
||||||
|
if xIPv6.CmpAddress(yIPv6) <= -1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainsNetwork returns true if the network from IPv6Addr is contained within
|
||||||
|
// the receiver.
|
||||||
|
func (x IPv6Addr) ContainsNetwork(y IPv6Addr) bool {
|
||||||
|
{
|
||||||
|
xIPv6 := x.FirstUsable().(IPv6Addr)
|
||||||
|
yIPv6 := y.FirstUsable().(IPv6Addr)
|
||||||
|
if ret := xIPv6.CmpAddress(yIPv6); ret >= 1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
xIPv6 := x.LastUsable().(IPv6Addr)
|
||||||
|
yIPv6 := y.LastUsable().(IPv6Addr)
|
||||||
|
if ret := xIPv6.CmpAddress(yIPv6); ret <= -1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// DialPacketArgs returns the arguments required to be passed to
|
||||||
|
// net.DialUDP(). If the Mask of ipv6 is not a /128 or the Port is 0,
|
||||||
|
// DialPacketArgs() will fail. See Host() to create an IPv6Addr with its
|
||||||
|
// mask set to /128.
|
||||||
|
func (ipv6 IPv6Addr) DialPacketArgs() (network, dialArgs string) {
|
||||||
|
ipv6Mask := big.Int(*ipv6.Mask)
|
||||||
|
if ipv6Mask.Cmp(ipv6HostMask) != 0 || ipv6.Port == 0 {
|
||||||
|
return "udp6", ""
|
||||||
|
}
|
||||||
|
return "udp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DialStreamArgs returns the arguments required to be passed to
|
||||||
|
// net.DialTCP(). If the Mask of ipv6 is not a /128 or the Port is 0,
|
||||||
|
// DialStreamArgs() will fail. See Host() to create an IPv6Addr with its
|
||||||
|
// mask set to /128.
|
||||||
|
func (ipv6 IPv6Addr) DialStreamArgs() (network, dialArgs string) {
|
||||||
|
ipv6Mask := big.Int(*ipv6.Mask)
|
||||||
|
if ipv6Mask.Cmp(ipv6HostMask) != 0 || ipv6.Port == 0 {
|
||||||
|
return "tcp6", ""
|
||||||
|
}
|
||||||
|
return "tcp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal returns true if a SockAddr is equal to the receiving IPv4Addr.
|
||||||
|
func (ipv6a IPv6Addr) Equal(sa SockAddr) bool {
|
||||||
|
ipv6b, ok := sa.(IPv6Addr)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if ipv6a.NetIP().String() != ipv6b.NetIP().String() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if ipv6a.NetIPNet().String() != ipv6b.NetIPNet().String() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if ipv6a.Port != ipv6b.Port {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstUsable returns an IPv6Addr set to the first address following the
|
||||||
|
// network prefix. The first usable address in a network is normally the
|
||||||
|
// gateway and should not be used except by devices forwarding packets
|
||||||
|
// between two administratively distinct networks (i.e. a router). This
|
||||||
|
// function does not discriminate against first usable vs "first address that
|
||||||
|
// should be used." For example, FirstUsable() on "2001:0db8::0003/64" would
|
||||||
|
// return "2001:0db8::00011".
|
||||||
|
func (ipv6 IPv6Addr) FirstUsable() IPAddr {
|
||||||
|
return IPv6Addr{
|
||||||
|
Address: IPv6Address(ipv6.NetworkAddress()),
|
||||||
|
Mask: ipv6HostMask,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Host returns a copy of ipv6 with its mask set to /128 so that it can be
|
||||||
|
// used by DialPacketArgs(), DialStreamArgs(), ListenPacketArgs(), or
|
||||||
|
// ListenStreamArgs().
|
||||||
|
func (ipv6 IPv6Addr) Host() IPAddr {
|
||||||
|
// Nothing should listen on a broadcast address.
|
||||||
|
return IPv6Addr{
|
||||||
|
Address: ipv6.Address,
|
||||||
|
Mask: ipv6HostMask,
|
||||||
|
Port: ipv6.Port,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPPort returns the Port number attached to the IPv6Addr
|
||||||
|
func (ipv6 IPv6Addr) IPPort() IPPort {
|
||||||
|
return ipv6.Port
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastUsable returns the last address in a given network.
|
||||||
|
func (ipv6 IPv6Addr) LastUsable() IPAddr {
|
||||||
|
addr := new(big.Int)
|
||||||
|
addr.Set(ipv6.Address)
|
||||||
|
|
||||||
|
mask := new(big.Int)
|
||||||
|
mask.Set(ipv6.Mask)
|
||||||
|
|
||||||
|
negMask := new(big.Int)
|
||||||
|
negMask.Xor(ipv6HostMask, mask)
|
||||||
|
|
||||||
|
lastAddr := new(big.Int)
|
||||||
|
lastAddr.And(addr, mask)
|
||||||
|
lastAddr.Or(lastAddr, negMask)
|
||||||
|
|
||||||
|
return IPv6Addr{
|
||||||
|
Address: IPv6Address(lastAddr),
|
||||||
|
Mask: ipv6HostMask,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListenPacketArgs returns the arguments required to be passed to
|
||||||
|
// net.ListenUDP(). If the Mask of ipv6 is not a /128, ListenPacketArgs()
|
||||||
|
// will fail. See Host() to create an IPv6Addr with its mask set to /128.
|
||||||
|
func (ipv6 IPv6Addr) ListenPacketArgs() (network, listenArgs string) {
|
||||||
|
ipv6Mask := big.Int(*ipv6.Mask)
|
||||||
|
if ipv6Mask.Cmp(ipv6HostMask) != 0 {
|
||||||
|
return "udp6", ""
|
||||||
|
}
|
||||||
|
return "udp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListenStreamArgs returns the arguments required to be passed to
|
||||||
|
// net.ListenTCP(). If the Mask of ipv6 is not a /128, ListenStreamArgs()
|
||||||
|
// will fail. See Host() to create an IPv6Addr with its mask set to /128.
|
||||||
|
func (ipv6 IPv6Addr) ListenStreamArgs() (network, listenArgs string) {
|
||||||
|
ipv6Mask := big.Int(*ipv6.Mask)
|
||||||
|
if ipv6Mask.Cmp(ipv6HostMask) != 0 {
|
||||||
|
return "tcp6", ""
|
||||||
|
}
|
||||||
|
return "tcp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Maskbits returns the number of network mask bits in a given IPv6Addr. For
|
||||||
|
// example, the Maskbits() of "2001:0db8::0003/64" would return 64.
|
||||||
|
func (ipv6 IPv6Addr) Maskbits() int {
|
||||||
|
maskOnes, _ := ipv6.NetIPNet().Mask.Size()
|
||||||
|
|
||||||
|
return maskOnes
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustIPv6Addr is a helper method that must return an IPv6Addr or panic on
|
||||||
|
// invalid input.
|
||||||
|
func MustIPv6Addr(addr string) IPv6Addr {
|
||||||
|
ipv6, err := NewIPv6Addr(addr)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("Unable to create an IPv6Addr from %+q: %v", addr, err))
|
||||||
|
}
|
||||||
|
return ipv6
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetIP returns the address as a net.IP.
|
||||||
|
func (ipv6 IPv6Addr) NetIP() *net.IP {
|
||||||
|
return bigIntToNetIPv6(ipv6.Address)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetIPMask create a new net.IPMask from the IPv6Addr.
|
||||||
|
func (ipv6 IPv6Addr) NetIPMask() *net.IPMask {
|
||||||
|
ipv6Mask := make(net.IPMask, IPv6len)
|
||||||
|
m := big.Int(*ipv6.Mask)
|
||||||
|
copy(ipv6Mask, m.Bytes())
|
||||||
|
return &ipv6Mask
|
||||||
|
}
|
||||||
|
|
||||||
|
// Network returns a pointer to the net.IPNet within IPv4Addr receiver.
|
||||||
|
func (ipv6 IPv6Addr) NetIPNet() *net.IPNet {
|
||||||
|
ipv6net := &net.IPNet{}
|
||||||
|
ipv6net.IP = make(net.IP, IPv6len)
|
||||||
|
copy(ipv6net.IP, *ipv6.NetIP())
|
||||||
|
ipv6net.Mask = *ipv6.NetIPMask()
|
||||||
|
return ipv6net
|
||||||
|
}
|
||||||
|
|
||||||
|
// Network returns the network prefix or network address for a given network.
|
||||||
|
func (ipv6 IPv6Addr) Network() IPAddr {
|
||||||
|
return IPv6Addr{
|
||||||
|
Address: IPv6Address(ipv6.NetworkAddress()),
|
||||||
|
Mask: ipv6.Mask,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetworkAddress returns an IPv6Network of the IPv6Addr's network address.
|
||||||
|
func (ipv6 IPv6Addr) NetworkAddress() IPv6Network {
|
||||||
|
addr := new(big.Int)
|
||||||
|
addr.SetBytes((*ipv6.Address).Bytes())
|
||||||
|
|
||||||
|
mask := new(big.Int)
|
||||||
|
mask.SetBytes(*ipv6.NetIPMask())
|
||||||
|
|
||||||
|
netAddr := new(big.Int)
|
||||||
|
netAddr.And(addr, mask)
|
||||||
|
|
||||||
|
return IPv6Network(netAddr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Octets returns a slice of the 16 octets in an IPv6Addr's Address. The
|
||||||
|
// order of the bytes is big endian.
|
||||||
|
func (ipv6 IPv6Addr) Octets() []int {
|
||||||
|
x := make([]int, IPv6len)
|
||||||
|
for i, b := range *bigIntToNetIPv6(ipv6.Address) {
|
||||||
|
x[i] = int(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation of the IPv6Addr
|
||||||
|
func (ipv6 IPv6Addr) String() string {
|
||||||
|
if ipv6.Port != 0 {
|
||||||
|
return fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ipv6.Maskbits() == 128 {
|
||||||
|
return ipv6.NetIP().String()
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("%s/%d", ipv6.NetIP().String(), ipv6.Maskbits())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type is used as a type switch and returns TypeIPv6
|
||||||
|
func (IPv6Addr) Type() SockAddrType {
|
||||||
|
return TypeIPv6
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPv6Attrs returns a list of attributes supported by the IPv6Addr type
|
||||||
|
func IPv6Attrs() []AttrName {
|
||||||
|
return ipv6AddrAttrs
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPv6AddrAttr returns a string representation of an attribute for the given
|
||||||
|
// IPv6Addr.
|
||||||
|
func IPv6AddrAttr(ipv6 IPv6Addr, selector AttrName) string {
|
||||||
|
fn, found := ipv6AddrAttrMap[selector]
|
||||||
|
if !found {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return fn(ipv6)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ipv6AddrInit is called once at init()
|
||||||
|
func ipv6AddrInit() {
|
||||||
|
// Sorted for human readability
|
||||||
|
ipv6AddrAttrs = []AttrName{
|
||||||
|
"size", // Same position as in IPv6 for output consistency
|
||||||
|
"uint128",
|
||||||
|
}
|
||||||
|
|
||||||
|
ipv6AddrAttrMap = map[AttrName]func(ipv6 IPv6Addr) string{
|
||||||
|
"size": func(ipv6 IPv6Addr) string {
|
||||||
|
netSize := big.NewInt(1)
|
||||||
|
netSize = netSize.Lsh(netSize, uint(IPv6len*8-ipv6.Maskbits()))
|
||||||
|
return netSize.Text(10)
|
||||||
|
},
|
||||||
|
"uint128": func(ipv6 IPv6Addr) string {
|
||||||
|
b := big.Int(*ipv6.Address)
|
||||||
|
return b.Text(10)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// bigIntToNetIPv6 is a helper function that correctly returns a net.IP with the
|
||||||
|
// correctly padded values.
|
||||||
|
func bigIntToNetIPv6(bi *big.Int) *net.IP {
|
||||||
|
x := make(net.IP, IPv6len)
|
||||||
|
ipv6Bytes := bi.Bytes()
|
||||||
|
|
||||||
|
// It's possibe for ipv6Bytes to be less than IPv6len bytes in size. If
|
||||||
|
// they are different sizes we to pad the size of response.
|
||||||
|
if len(ipv6Bytes) < IPv6len {
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
buf.Grow(IPv6len)
|
||||||
|
|
||||||
|
for i := len(ipv6Bytes); i < IPv6len; i++ {
|
||||||
|
if err := binary.Write(buf, binary.BigEndian, byte(0)); err != nil {
|
||||||
|
panic(fmt.Sprintf("Unable to pad byte %d of input %v: %v", i, bi, err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, b := range ipv6Bytes {
|
||||||
|
if err := binary.Write(buf, binary.BigEndian, b); err != nil {
|
||||||
|
panic(fmt.Sprintf("Unable to preserve endianness of input %v: %v", bi, err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ipv6Bytes = buf.Bytes()
|
||||||
|
}
|
||||||
|
i := copy(x, ipv6Bytes)
|
||||||
|
if i != IPv6len {
|
||||||
|
panic("IPv6 wrong size")
|
||||||
|
}
|
||||||
|
return &x
|
||||||
|
}
|
948
vendor/github.com/hashicorp/go-sockaddr/rfc.go
generated
vendored
Normal file
948
vendor/github.com/hashicorp/go-sockaddr/rfc.go
generated
vendored
Normal file
|
@ -0,0 +1,948 @@
|
||||||
|
package sockaddr
|
||||||
|
|
||||||
|
// ForwardingBlacklist is a faux RFC that includes a list of non-forwardable IP
|
||||||
|
// blocks.
|
||||||
|
const ForwardingBlacklist = 4294967295
|
||||||
|
const ForwardingBlacklistRFC = "4294967295"
|
||||||
|
|
||||||
|
// IsRFC tests to see if an SockAddr matches the specified RFC
|
||||||
|
func IsRFC(rfcNum uint, sa SockAddr) bool {
|
||||||
|
rfcNetMap := KnownRFCs()
|
||||||
|
rfcNets, ok := rfcNetMap[rfcNum]
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var contained bool
|
||||||
|
for _, rfcNet := range rfcNets {
|
||||||
|
if rfcNet.Contains(sa) {
|
||||||
|
contained = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return contained
|
||||||
|
}
|
||||||
|
|
||||||
|
// KnownRFCs returns an initial set of known RFCs.
|
||||||
|
//
|
||||||
|
// NOTE (sean@): As this list evolves over time, please submit patches to keep
|
||||||
|
// this list current. If something isn't right, inquire, as it may just be a
|
||||||
|
// bug on my part. Some of the inclusions were based on my judgement as to what
|
||||||
|
// would be a useful value (e.g. RFC3330).
|
||||||
|
//
|
||||||
|
// Useful resources:
|
||||||
|
//
|
||||||
|
// * https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml
|
||||||
|
// * https://www.iana.org/assignments/ipv6-unicast-address-assignments/ipv6-unicast-address-assignments.xhtml
|
||||||
|
// * https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml
|
||||||
|
func KnownRFCs() map[uint]SockAddrs {
|
||||||
|
// NOTE(sean@): Multiple SockAddrs per RFC lend themselves well to a
|
||||||
|
// RADIX tree, but `ENOTIME`. Patches welcome.
|
||||||
|
return map[uint]SockAddrs{
|
||||||
|
919: {
|
||||||
|
// [RFC919] Broadcasting Internet Datagrams
|
||||||
|
MustIPv4Addr("255.255.255.255/32"), // [RFC1122], §7 Broadcast IP Addressing - Proposed Standards
|
||||||
|
},
|
||||||
|
1122: {
|
||||||
|
// [RFC1122] Requirements for Internet Hosts -- Communication Layers
|
||||||
|
MustIPv4Addr("0.0.0.0/8"), // [RFC1122], §3.2.1.3
|
||||||
|
MustIPv4Addr("127.0.0.0/8"), // [RFC1122], §3.2.1.3
|
||||||
|
},
|
||||||
|
1112: {
|
||||||
|
// [RFC1112] Host Extensions for IP Multicasting
|
||||||
|
MustIPv4Addr("224.0.0.0/4"), // [RFC1112], §4 Host Group Addresses
|
||||||
|
},
|
||||||
|
1918: {
|
||||||
|
// [RFC1918] Address Allocation for Private Internets
|
||||||
|
MustIPv4Addr("10.0.0.0/8"),
|
||||||
|
MustIPv4Addr("172.16.0.0/12"),
|
||||||
|
MustIPv4Addr("192.168.0.0/16"),
|
||||||
|
},
|
||||||
|
2544: {
|
||||||
|
// [RFC2544] Benchmarking Methodology for Network
|
||||||
|
// Interconnect Devices
|
||||||
|
MustIPv4Addr("198.18.0.0/15"),
|
||||||
|
},
|
||||||
|
2765: {
|
||||||
|
// [RFC2765] Stateless IP/ICMP Translation Algorithm
|
||||||
|
// (SIIT) (obsoleted by RFCs 6145, which itself was
|
||||||
|
// later obsoleted by 7915).
|
||||||
|
|
||||||
|
// [RFC2765], §2.1 Addresses
|
||||||
|
MustIPv6Addr("0:0:0:0:0:ffff:0:0/96"),
|
||||||
|
},
|
||||||
|
2928: {
|
||||||
|
// [RFC2928] Initial IPv6 Sub-TLA ID Assignments
|
||||||
|
MustIPv6Addr("2001::/16"), // Superblock
|
||||||
|
//MustIPv6Addr("2001:0000::/23"), // IANA
|
||||||
|
//MustIPv6Addr("2001:0200::/23"), // APNIC
|
||||||
|
//MustIPv6Addr("2001:0400::/23"), // ARIN
|
||||||
|
//MustIPv6Addr("2001:0600::/23"), // RIPE NCC
|
||||||
|
//MustIPv6Addr("2001:0800::/23"), // (future assignment)
|
||||||
|
// ...
|
||||||
|
//MustIPv6Addr("2001:FE00::/23"), // (future assignment)
|
||||||
|
},
|
||||||
|
3056: { // 6to4 address
|
||||||
|
// [RFC3056] Connection of IPv6 Domains via IPv4 Clouds
|
||||||
|
|
||||||
|
// [RFC3056], §2 IPv6 Prefix Allocation
|
||||||
|
MustIPv6Addr("2002::/16"),
|
||||||
|
},
|
||||||
|
3068: {
|
||||||
|
// [RFC3068] An Anycast Prefix for 6to4 Relay Routers
|
||||||
|
// (obsolete by RFC7526)
|
||||||
|
|
||||||
|
// [RFC3068], § 6to4 Relay anycast address
|
||||||
|
MustIPv4Addr("192.88.99.0/24"),
|
||||||
|
|
||||||
|
// [RFC3068], §2.5 6to4 IPv6 relay anycast address
|
||||||
|
//
|
||||||
|
// NOTE: /120 == 128-(32-24)
|
||||||
|
MustIPv6Addr("2002:c058:6301::/120"),
|
||||||
|
},
|
||||||
|
3171: {
|
||||||
|
// [RFC3171] IANA Guidelines for IPv4 Multicast Address Assignments
|
||||||
|
MustIPv4Addr("224.0.0.0/4"),
|
||||||
|
},
|
||||||
|
3330: {
|
||||||
|
// [RFC3330] Special-Use IPv4 Addresses
|
||||||
|
|
||||||
|
// Addresses in this block refer to source hosts on
|
||||||
|
// "this" network. Address 0.0.0.0/32 may be used as a
|
||||||
|
// source address for this host on this network; other
|
||||||
|
// addresses within 0.0.0.0/8 may be used to refer to
|
||||||
|
// specified hosts on this network [RFC1700, page 4].
|
||||||
|
MustIPv4Addr("0.0.0.0/8"),
|
||||||
|
|
||||||
|
// 10.0.0.0/8 - This block is set aside for use in
|
||||||
|
// private networks. Its intended use is documented in
|
||||||
|
// [RFC1918]. Addresses within this block should not
|
||||||
|
// appear on the public Internet.
|
||||||
|
MustIPv4Addr("10.0.0.0/8"),
|
||||||
|
|
||||||
|
// 14.0.0.0/8 - This block is set aside for assignments
|
||||||
|
// to the international system of Public Data Networks
|
||||||
|
// [RFC1700, page 181]. The registry of assignments
|
||||||
|
// within this block can be accessed from the "Public
|
||||||
|
// Data Network Numbers" link on the web page at
|
||||||
|
// http://www.iana.org/numbers.html. Addresses within
|
||||||
|
// this block are assigned to users and should be
|
||||||
|
// treated as such.
|
||||||
|
|
||||||
|
// 24.0.0.0/8 - This block was allocated in early 1996
|
||||||
|
// for use in provisioning IP service over cable
|
||||||
|
// television systems. Although the IANA initially was
|
||||||
|
// involved in making assignments to cable operators,
|
||||||
|
// this responsibility was transferred to American
|
||||||
|
// Registry for Internet Numbers (ARIN) in May 2001.
|
||||||
|
// Addresses within this block are assigned in the
|
||||||
|
// normal manner and should be treated as such.
|
||||||
|
|
||||||
|
// 39.0.0.0/8 - This block was used in the "Class A
|
||||||
|
// Subnet Experiment" that commenced in May 1995, as
|
||||||
|
// documented in [RFC1797]. The experiment has been
|
||||||
|
// completed and this block has been returned to the
|
||||||
|
// pool of addresses reserved for future allocation or
|
||||||
|
// assignment. This block therefore no longer has a
|
||||||
|
// special use and is subject to allocation to a
|
||||||
|
// Regional Internet Registry for assignment in the
|
||||||
|
// normal manner.
|
||||||
|
|
||||||
|
// 127.0.0.0/8 - This block is assigned for use as the Internet host
|
||||||
|
// loopback address. A datagram sent by a higher level protocol to an
|
||||||
|
// address anywhere within this block should loop back inside the host.
|
||||||
|
// This is ordinarily implemented using only 127.0.0.1/32 for loopback,
|
||||||
|
// but no addresses within this block should ever appear on any network
|
||||||
|
// anywhere [RFC1700, page 5].
|
||||||
|
MustIPv4Addr("127.0.0.0/8"),
|
||||||
|
|
||||||
|
// 128.0.0.0/16 - This block, corresponding to the
|
||||||
|
// numerically lowest of the former Class B addresses,
|
||||||
|
// was initially and is still reserved by the IANA.
|
||||||
|
// Given the present classless nature of the IP address
|
||||||
|
// space, the basis for the reservation no longer
|
||||||
|
// applies and addresses in this block are subject to
|
||||||
|
// future allocation to a Regional Internet Registry for
|
||||||
|
// assignment in the normal manner.
|
||||||
|
|
||||||
|
// 169.254.0.0/16 - This is the "link local" block. It
|
||||||
|
// is allocated for communication between hosts on a
|
||||||
|
// single link. Hosts obtain these addresses by
|
||||||
|
// auto-configuration, such as when a DHCP server may
|
||||||
|
// not be found.
|
||||||
|
MustIPv4Addr("169.254.0.0/16"),
|
||||||
|
|
||||||
|
// 172.16.0.0/12 - This block is set aside for use in
|
||||||
|
// private networks. Its intended use is documented in
|
||||||
|
// [RFC1918]. Addresses within this block should not
|
||||||
|
// appear on the public Internet.
|
||||||
|
MustIPv4Addr("172.16.0.0/12"),
|
||||||
|
|
||||||
|
// 191.255.0.0/16 - This block, corresponding to the numerically highest
|
||||||
|
// to the former Class B addresses, was initially and is still reserved
|
||||||
|
// by the IANA. Given the present classless nature of the IP address
|
||||||
|
// space, the basis for the reservation no longer applies and addresses
|
||||||
|
// in this block are subject to future allocation to a Regional Internet
|
||||||
|
// Registry for assignment in the normal manner.
|
||||||
|
|
||||||
|
// 192.0.0.0/24 - This block, corresponding to the
|
||||||
|
// numerically lowest of the former Class C addresses,
|
||||||
|
// was initially and is still reserved by the IANA.
|
||||||
|
// Given the present classless nature of the IP address
|
||||||
|
// space, the basis for the reservation no longer
|
||||||
|
// applies and addresses in this block are subject to
|
||||||
|
// future allocation to a Regional Internet Registry for
|
||||||
|
// assignment in the normal manner.
|
||||||
|
|
||||||
|
// 192.0.2.0/24 - This block is assigned as "TEST-NET" for use in
|
||||||
|
// documentation and example code. It is often used in conjunction with
|
||||||
|
// domain names example.com or example.net in vendor and protocol
|
||||||
|
// documentation. Addresses within this block should not appear on the
|
||||||
|
// public Internet.
|
||||||
|
MustIPv4Addr("192.0.2.0/24"),
|
||||||
|
|
||||||
|
// 192.88.99.0/24 - This block is allocated for use as 6to4 relay
|
||||||
|
// anycast addresses, according to [RFC3068].
|
||||||
|
MustIPv4Addr("192.88.99.0/24"),
|
||||||
|
|
||||||
|
// 192.168.0.0/16 - This block is set aside for use in private networks.
|
||||||
|
// Its intended use is documented in [RFC1918]. Addresses within this
|
||||||
|
// block should not appear on the public Internet.
|
||||||
|
MustIPv4Addr("192.168.0.0/16"),
|
||||||
|
|
||||||
|
// 198.18.0.0/15 - This block has been allocated for use
|
||||||
|
// in benchmark tests of network interconnect devices.
|
||||||
|
// Its use is documented in [RFC2544].
|
||||||
|
MustIPv4Addr("198.18.0.0/15"),
|
||||||
|
|
||||||
|
// 223.255.255.0/24 - This block, corresponding to the
|
||||||
|
// numerically highest of the former Class C addresses,
|
||||||
|
// was initially and is still reserved by the IANA.
|
||||||
|
// Given the present classless nature of the IP address
|
||||||
|
// space, the basis for the reservation no longer
|
||||||
|
// applies and addresses in this block are subject to
|
||||||
|
// future allocation to a Regional Internet Registry for
|
||||||
|
// assignment in the normal manner.
|
||||||
|
|
||||||
|
// 224.0.0.0/4 - This block, formerly known as the Class
|
||||||
|
// D address space, is allocated for use in IPv4
|
||||||
|
// multicast address assignments. The IANA guidelines
|
||||||
|
// for assignments from this space are described in
|
||||||
|
// [RFC3171].
|
||||||
|
MustIPv4Addr("224.0.0.0/4"),
|
||||||
|
|
||||||
|
// 240.0.0.0/4 - This block, formerly known as the Class E address
|
||||||
|
// space, is reserved. The "limited broadcast" destination address
|
||||||
|
// 255.255.255.255 should never be forwarded outside the (sub-)net of
|
||||||
|
// the source. The remainder of this space is reserved
|
||||||
|
// for future use. [RFC1700, page 4]
|
||||||
|
MustIPv4Addr("240.0.0.0/4"),
|
||||||
|
},
|
||||||
|
3849: {
|
||||||
|
// [RFC3849] IPv6 Address Prefix Reserved for Documentation
|
||||||
|
MustIPv6Addr("2001:db8::/32"), // [RFC3849], §4 IANA Considerations
|
||||||
|
},
|
||||||
|
3927: {
|
||||||
|
// [RFC3927] Dynamic Configuration of IPv4 Link-Local Addresses
|
||||||
|
MustIPv4Addr("169.254.0.0/16"), // [RFC3927], §2.1 Link-Local Address Selection
|
||||||
|
},
|
||||||
|
4038: {
|
||||||
|
// [RFC4038] Application Aspects of IPv6 Transition
|
||||||
|
|
||||||
|
// [RFC4038], §4.2. IPv6 Applications in a Dual-Stack Node
|
||||||
|
MustIPv6Addr("0:0:0:0:0:ffff::/96"),
|
||||||
|
},
|
||||||
|
4193: {
|
||||||
|
// [RFC4193] Unique Local IPv6 Unicast Addresses
|
||||||
|
MustIPv6Addr("fc00::/7"),
|
||||||
|
},
|
||||||
|
4291: {
|
||||||
|
// [RFC4291] IP Version 6 Addressing Architecture
|
||||||
|
|
||||||
|
// [RFC4291], §2.5.2 The Unspecified Address
|
||||||
|
MustIPv6Addr("::/128"),
|
||||||
|
|
||||||
|
// [RFC4291], §2.5.3 The Loopback Address
|
||||||
|
MustIPv6Addr("::1/128"),
|
||||||
|
|
||||||
|
// [RFC4291], §2.5.5.1. IPv4-Compatible IPv6 Address
|
||||||
|
MustIPv6Addr("::/96"),
|
||||||
|
|
||||||
|
// [RFC4291], §2.5.5.2. IPv4-Mapped IPv6 Address
|
||||||
|
MustIPv6Addr("::ffff:0:0/96"),
|
||||||
|
|
||||||
|
// [RFC4291], §2.5.6 Link-Local IPv6 Unicast Addresses
|
||||||
|
MustIPv6Addr("fe80::/10"),
|
||||||
|
|
||||||
|
// [RFC4291], §2.5.7 Site-Local IPv6 Unicast Addresses
|
||||||
|
// (depreciated)
|
||||||
|
MustIPv6Addr("fec0::/10"),
|
||||||
|
|
||||||
|
// [RFC4291], §2.7 Multicast Addresses
|
||||||
|
MustIPv6Addr("ff00::/8"),
|
||||||
|
|
||||||
|
// IPv6 Multicast Information.
|
||||||
|
//
|
||||||
|
// In the following "table" below, `ff0x` is replaced
|
||||||
|
// with the following values depending on the scope of
|
||||||
|
// the query:
|
||||||
|
//
|
||||||
|
// IPv6 Multicast Scopes:
|
||||||
|
// * ff00/9 // reserved
|
||||||
|
// * ff01/9 // interface-local
|
||||||
|
// * ff02/9 // link-local
|
||||||
|
// * ff03/9 // realm-local
|
||||||
|
// * ff04/9 // admin-local
|
||||||
|
// * ff05/9 // site-local
|
||||||
|
// * ff08/9 // organization-local
|
||||||
|
// * ff0e/9 // global
|
||||||
|
// * ff0f/9 // reserved
|
||||||
|
//
|
||||||
|
// IPv6 Multicast Addresses:
|
||||||
|
// * ff0x::2 // All routers
|
||||||
|
// * ff02::5 // OSPFIGP
|
||||||
|
// * ff02::6 // OSPFIGP Designated Routers
|
||||||
|
// * ff02::9 // RIP Routers
|
||||||
|
// * ff02::a // EIGRP Routers
|
||||||
|
// * ff02::d // All PIM Routers
|
||||||
|
// * ff02::1a // All RPL Routers
|
||||||
|
// * ff0x::fb // mDNSv6
|
||||||
|
// * ff0x::101 // All Network Time Protocol (NTP) servers
|
||||||
|
// * ff02::1:1 // Link Name
|
||||||
|
// * ff02::1:2 // All-dhcp-agents
|
||||||
|
// * ff02::1:3 // Link-local Multicast Name Resolution
|
||||||
|
// * ff05::1:3 // All-dhcp-servers
|
||||||
|
// * ff02::1:ff00:0/104 // Solicited-node multicast address.
|
||||||
|
// * ff02::2:ff00:0/104 // Node Information Queries
|
||||||
|
},
|
||||||
|
4380: {
|
||||||
|
// [RFC4380] Teredo: Tunneling IPv6 over UDP through
|
||||||
|
// Network Address Translations (NATs)
|
||||||
|
|
||||||
|
// [RFC4380], §2.6 Global Teredo IPv6 Service Prefix
|
||||||
|
MustIPv6Addr("2001:0000::/32"),
|
||||||
|
},
|
||||||
|
4773: {
|
||||||
|
// [RFC4773] Administration of the IANA Special Purpose IPv6 Address Block
|
||||||
|
MustIPv6Addr("2001:0000::/23"), // IANA
|
||||||
|
},
|
||||||
|
4843: {
|
||||||
|
// [RFC4843] An IPv6 Prefix for Overlay Routable Cryptographic Hash Identifiers (ORCHID)
|
||||||
|
MustIPv6Addr("2001:10::/28"), // [RFC4843], §7 IANA Considerations
|
||||||
|
},
|
||||||
|
5180: {
|
||||||
|
// [RFC5180] IPv6 Benchmarking Methodology for Network Interconnect Devices
|
||||||
|
MustIPv6Addr("2001:0200::/48"), // [RFC5180], §8 IANA Considerations
|
||||||
|
},
|
||||||
|
5735: {
|
||||||
|
// [RFC5735] Special Use IPv4 Addresses
|
||||||
|
MustIPv4Addr("192.0.2.0/24"), // TEST-NET-1
|
||||||
|
MustIPv4Addr("198.51.100.0/24"), // TEST-NET-2
|
||||||
|
MustIPv4Addr("203.0.113.0/24"), // TEST-NET-3
|
||||||
|
MustIPv4Addr("198.18.0.0/15"), // Benchmarks
|
||||||
|
},
|
||||||
|
5737: {
|
||||||
|
// [RFC5737] IPv4 Address Blocks Reserved for Documentation
|
||||||
|
MustIPv4Addr("192.0.2.0/24"), // TEST-NET-1
|
||||||
|
MustIPv4Addr("198.51.100.0/24"), // TEST-NET-2
|
||||||
|
MustIPv4Addr("203.0.113.0/24"), // TEST-NET-3
|
||||||
|
},
|
||||||
|
6052: {
|
||||||
|
// [RFC6052] IPv6 Addressing of IPv4/IPv6 Translators
|
||||||
|
MustIPv6Addr("64:ff9b::/96"), // [RFC6052], §2.1. Well-Known Prefix
|
||||||
|
},
|
||||||
|
6333: {
|
||||||
|
// [RFC6333] Dual-Stack Lite Broadband Deployments Following IPv4 Exhaustion
|
||||||
|
MustIPv4Addr("192.0.0.0/29"), // [RFC6333], §5.7 Well-Known IPv4 Address
|
||||||
|
},
|
||||||
|
6598: {
|
||||||
|
// [RFC6598] IANA-Reserved IPv4 Prefix for Shared Address Space
|
||||||
|
MustIPv4Addr("100.64.0.0/10"),
|
||||||
|
},
|
||||||
|
6666: {
|
||||||
|
// [RFC6666] A Discard Prefix for IPv6
|
||||||
|
MustIPv6Addr("0100::/64"),
|
||||||
|
},
|
||||||
|
6890: {
|
||||||
|
// [RFC6890] Special-Purpose IP Address Registries
|
||||||
|
|
||||||
|
// From "RFC6890 §2.2.1 Information Requirements":
|
||||||
|
/*
|
||||||
|
The IPv4 and IPv6 Special-Purpose Address Registries maintain the
|
||||||
|
following information regarding each entry:
|
||||||
|
|
||||||
|
o Address Block - A block of IPv4 or IPv6 addresses that has been
|
||||||
|
registered for a special purpose.
|
||||||
|
|
||||||
|
o Name - A descriptive name for the special-purpose address block.
|
||||||
|
|
||||||
|
o RFC - The RFC through which the special-purpose address block was
|
||||||
|
requested.
|
||||||
|
|
||||||
|
o Allocation Date - The date upon which the special-purpose address
|
||||||
|
block was allocated.
|
||||||
|
|
||||||
|
o Termination Date - The date upon which the allocation is to be
|
||||||
|
terminated. This field is applicable for limited-use allocations
|
||||||
|
only.
|
||||||
|
|
||||||
|
o Source - A boolean value indicating whether an address from the
|
||||||
|
allocated special-purpose address block is valid when used as the
|
||||||
|
source address of an IP datagram that transits two devices.
|
||||||
|
|
||||||
|
o Destination - A boolean value indicating whether an address from
|
||||||
|
the allocated special-purpose address block is valid when used as
|
||||||
|
the destination address of an IP datagram that transits two
|
||||||
|
devices.
|
||||||
|
|
||||||
|
o Forwardable - A boolean value indicating whether a router may
|
||||||
|
forward an IP datagram whose destination address is drawn from the
|
||||||
|
allocated special-purpose address block between external
|
||||||
|
interfaces.
|
||||||
|
|
||||||
|
o Global - A boolean value indicating whether an IP datagram whose
|
||||||
|
destination address is drawn from the allocated special-purpose
|
||||||
|
address block is forwardable beyond a specified administrative
|
||||||
|
domain.
|
||||||
|
|
||||||
|
o Reserved-by-Protocol - A boolean value indicating whether the
|
||||||
|
special-purpose address block is reserved by IP, itself. This
|
||||||
|
value is "TRUE" if the RFC that created the special-purpose
|
||||||
|
address block requires all compliant IP implementations to behave
|
||||||
|
in a special way when processing packets either to or from
|
||||||
|
addresses contained by the address block.
|
||||||
|
|
||||||
|
If the value of "Destination" is FALSE, the values of "Forwardable"
|
||||||
|
and "Global" must also be false.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*+----------------------+----------------------------+
|
||||||
|
* | Attribute | Value |
|
||||||
|
* +----------------------+----------------------------+
|
||||||
|
* | Address Block | 0.0.0.0/8 |
|
||||||
|
* | Name | "This host on this network"|
|
||||||
|
* | RFC | [RFC1122], Section 3.2.1.3 |
|
||||||
|
* | Allocation Date | September 1981 |
|
||||||
|
* | Termination Date | N/A |
|
||||||
|
* | Source | True |
|
||||||
|
* | Destination | False |
|
||||||
|
* | Forwardable | False |
|
||||||
|
* | Global | False |
|
||||||
|
* | Reserved-by-Protocol | True |
|
||||||
|
* +----------------------+----------------------------+*/
|
||||||
|
MustIPv4Addr("0.0.0.0/8"),
|
||||||
|
|
||||||
|
/*+----------------------+---------------+
|
||||||
|
* | Attribute | Value |
|
||||||
|
* +----------------------+---------------+
|
||||||
|
* | Address Block | 10.0.0.0/8 |
|
||||||
|
* | Name | Private-Use |
|
||||||
|
* | RFC | [RFC1918] |
|
||||||
|
* | Allocation Date | February 1996 |
|
||||||
|
* | Termination Date | N/A |
|
||||||
|
* | Source | True |
|
||||||
|
* | Destination | True |
|
||||||
|
* | Forwardable | True |
|
||||||
|
* | Global | False |
|
||||||
|
* | Reserved-by-Protocol | False |
|
||||||
|
* +----------------------+---------------+ */
|
||||||
|
MustIPv4Addr("10.0.0.0/8"),
|
||||||
|
|
||||||
|
/*+----------------------+----------------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+----------------------+
|
||||||
|
| Address Block | 100.64.0.0/10 |
|
||||||
|
| Name | Shared Address Space |
|
||||||
|
| RFC | [RFC6598] |
|
||||||
|
| Allocation Date | April 2012 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | True |
|
||||||
|
| Destination | True |
|
||||||
|
| Forwardable | True |
|
||||||
|
| Global | False |
|
||||||
|
| Reserved-by-Protocol | False |
|
||||||
|
+----------------------+----------------------+*/
|
||||||
|
MustIPv4Addr("100.64.0.0/10"),
|
||||||
|
|
||||||
|
/*+----------------------+----------------------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+----------------------------+
|
||||||
|
| Address Block | 127.0.0.0/8 |
|
||||||
|
| Name | Loopback |
|
||||||
|
| RFC | [RFC1122], Section 3.2.1.3 |
|
||||||
|
| Allocation Date | September 1981 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | False [1] |
|
||||||
|
| Destination | False [1] |
|
||||||
|
| Forwardable | False [1] |
|
||||||
|
| Global | False [1] |
|
||||||
|
| Reserved-by-Protocol | True |
|
||||||
|
+----------------------+----------------------------+*/
|
||||||
|
// [1] Several protocols have been granted exceptions to
|
||||||
|
// this rule. For examples, see [RFC4379] and
|
||||||
|
// [RFC5884].
|
||||||
|
MustIPv4Addr("127.0.0.0/8"),
|
||||||
|
|
||||||
|
/*+----------------------+----------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+----------------+
|
||||||
|
| Address Block | 169.254.0.0/16 |
|
||||||
|
| Name | Link Local |
|
||||||
|
| RFC | [RFC3927] |
|
||||||
|
| Allocation Date | May 2005 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | True |
|
||||||
|
| Destination | True |
|
||||||
|
| Forwardable | False |
|
||||||
|
| Global | False |
|
||||||
|
| Reserved-by-Protocol | True |
|
||||||
|
+----------------------+----------------+*/
|
||||||
|
MustIPv4Addr("169.254.0.0/16"),
|
||||||
|
|
||||||
|
/*+----------------------+---------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+---------------+
|
||||||
|
| Address Block | 172.16.0.0/12 |
|
||||||
|
| Name | Private-Use |
|
||||||
|
| RFC | [RFC1918] |
|
||||||
|
| Allocation Date | February 1996 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | True |
|
||||||
|
| Destination | True |
|
||||||
|
| Forwardable | True |
|
||||||
|
| Global | False |
|
||||||
|
| Reserved-by-Protocol | False |
|
||||||
|
+----------------------+---------------+*/
|
||||||
|
MustIPv4Addr("172.16.0.0/12"),
|
||||||
|
|
||||||
|
/*+----------------------+---------------------------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+---------------------------------+
|
||||||
|
| Address Block | 192.0.0.0/24 [2] |
|
||||||
|
| Name | IETF Protocol Assignments |
|
||||||
|
| RFC | Section 2.1 of this document |
|
||||||
|
| Allocation Date | January 2010 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | False |
|
||||||
|
| Destination | False |
|
||||||
|
| Forwardable | False |
|
||||||
|
| Global | False |
|
||||||
|
| Reserved-by-Protocol | False |
|
||||||
|
+----------------------+---------------------------------+*/
|
||||||
|
// [2] Not usable unless by virtue of a more specific
|
||||||
|
// reservation.
|
||||||
|
MustIPv4Addr("192.0.0.0/24"),
|
||||||
|
|
||||||
|
/*+----------------------+--------------------------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+--------------------------------+
|
||||||
|
| Address Block | 192.0.0.0/29 |
|
||||||
|
| Name | IPv4 Service Continuity Prefix |
|
||||||
|
| RFC | [RFC6333], [RFC7335] |
|
||||||
|
| Allocation Date | June 2011 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | True |
|
||||||
|
| Destination | True |
|
||||||
|
| Forwardable | True |
|
||||||
|
| Global | False |
|
||||||
|
| Reserved-by-Protocol | False |
|
||||||
|
+----------------------+--------------------------------+*/
|
||||||
|
MustIPv4Addr("192.0.0.0/29"),
|
||||||
|
|
||||||
|
/*+----------------------+----------------------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+----------------------------+
|
||||||
|
| Address Block | 192.0.2.0/24 |
|
||||||
|
| Name | Documentation (TEST-NET-1) |
|
||||||
|
| RFC | [RFC5737] |
|
||||||
|
| Allocation Date | January 2010 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | False |
|
||||||
|
| Destination | False |
|
||||||
|
| Forwardable | False |
|
||||||
|
| Global | False |
|
||||||
|
| Reserved-by-Protocol | False |
|
||||||
|
+----------------------+----------------------------+*/
|
||||||
|
MustIPv4Addr("192.0.2.0/24"),
|
||||||
|
|
||||||
|
/*+----------------------+--------------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+--------------------+
|
||||||
|
| Address Block | 192.88.99.0/24 |
|
||||||
|
| Name | 6to4 Relay Anycast |
|
||||||
|
| RFC | [RFC3068] |
|
||||||
|
| Allocation Date | June 2001 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | True |
|
||||||
|
| Destination | True |
|
||||||
|
| Forwardable | True |
|
||||||
|
| Global | True |
|
||||||
|
| Reserved-by-Protocol | False |
|
||||||
|
+----------------------+--------------------+*/
|
||||||
|
MustIPv4Addr("192.88.99.0/24"),
|
||||||
|
|
||||||
|
/*+----------------------+----------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+----------------+
|
||||||
|
| Address Block | 192.168.0.0/16 |
|
||||||
|
| Name | Private-Use |
|
||||||
|
| RFC | [RFC1918] |
|
||||||
|
| Allocation Date | February 1996 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | True |
|
||||||
|
| Destination | True |
|
||||||
|
| Forwardable | True |
|
||||||
|
| Global | False |
|
||||||
|
| Reserved-by-Protocol | False |
|
||||||
|
+----------------------+----------------+*/
|
||||||
|
MustIPv4Addr("192.168.0.0/16"),
|
||||||
|
|
||||||
|
/*+----------------------+---------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+---------------+
|
||||||
|
| Address Block | 198.18.0.0/15 |
|
||||||
|
| Name | Benchmarking |
|
||||||
|
| RFC | [RFC2544] |
|
||||||
|
| Allocation Date | March 1999 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | True |
|
||||||
|
| Destination | True |
|
||||||
|
| Forwardable | True |
|
||||||
|
| Global | False |
|
||||||
|
| Reserved-by-Protocol | False |
|
||||||
|
+----------------------+---------------+*/
|
||||||
|
MustIPv4Addr("198.18.0.0/15"),
|
||||||
|
|
||||||
|
/*+----------------------+----------------------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+----------------------------+
|
||||||
|
| Address Block | 198.51.100.0/24 |
|
||||||
|
| Name | Documentation (TEST-NET-2) |
|
||||||
|
| RFC | [RFC5737] |
|
||||||
|
| Allocation Date | January 2010 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | False |
|
||||||
|
| Destination | False |
|
||||||
|
| Forwardable | False |
|
||||||
|
| Global | False |
|
||||||
|
| Reserved-by-Protocol | False |
|
||||||
|
+----------------------+----------------------------+*/
|
||||||
|
MustIPv4Addr("198.51.100.0/24"),
|
||||||
|
|
||||||
|
/*+----------------------+----------------------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+----------------------------+
|
||||||
|
| Address Block | 203.0.113.0/24 |
|
||||||
|
| Name | Documentation (TEST-NET-3) |
|
||||||
|
| RFC | [RFC5737] |
|
||||||
|
| Allocation Date | January 2010 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | False |
|
||||||
|
| Destination | False |
|
||||||
|
| Forwardable | False |
|
||||||
|
| Global | False |
|
||||||
|
| Reserved-by-Protocol | False |
|
||||||
|
+----------------------+----------------------------+*/
|
||||||
|
MustIPv4Addr("203.0.113.0/24"),
|
||||||
|
|
||||||
|
/*+----------------------+----------------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+----------------------+
|
||||||
|
| Address Block | 240.0.0.0/4 |
|
||||||
|
| Name | Reserved |
|
||||||
|
| RFC | [RFC1112], Section 4 |
|
||||||
|
| Allocation Date | August 1989 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | False |
|
||||||
|
| Destination | False |
|
||||||
|
| Forwardable | False |
|
||||||
|
| Global | False |
|
||||||
|
| Reserved-by-Protocol | True |
|
||||||
|
+----------------------+----------------------+*/
|
||||||
|
MustIPv4Addr("240.0.0.0/4"),
|
||||||
|
|
||||||
|
/*+----------------------+----------------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+----------------------+
|
||||||
|
| Address Block | 255.255.255.255/32 |
|
||||||
|
| Name | Limited Broadcast |
|
||||||
|
| RFC | [RFC0919], Section 7 |
|
||||||
|
| Allocation Date | October 1984 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | False |
|
||||||
|
| Destination | True |
|
||||||
|
| Forwardable | False |
|
||||||
|
| Global | False |
|
||||||
|
| Reserved-by-Protocol | False |
|
||||||
|
+----------------------+----------------------+*/
|
||||||
|
MustIPv4Addr("255.255.255.255/32"),
|
||||||
|
|
||||||
|
/*+----------------------+------------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+------------------+
|
||||||
|
| Address Block | ::1/128 |
|
||||||
|
| Name | Loopback Address |
|
||||||
|
| RFC | [RFC4291] |
|
||||||
|
| Allocation Date | February 2006 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | False |
|
||||||
|
| Destination | False |
|
||||||
|
| Forwardable | False |
|
||||||
|
| Global | False |
|
||||||
|
| Reserved-by-Protocol | True |
|
||||||
|
+----------------------+------------------+*/
|
||||||
|
MustIPv6Addr("::1/128"),
|
||||||
|
|
||||||
|
/*+----------------------+---------------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+---------------------+
|
||||||
|
| Address Block | ::/128 |
|
||||||
|
| Name | Unspecified Address |
|
||||||
|
| RFC | [RFC4291] |
|
||||||
|
| Allocation Date | February 2006 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | True |
|
||||||
|
| Destination | False |
|
||||||
|
| Forwardable | False |
|
||||||
|
| Global | False |
|
||||||
|
| Reserved-by-Protocol | True |
|
||||||
|
+----------------------+---------------------+*/
|
||||||
|
MustIPv6Addr("::/128"),
|
||||||
|
|
||||||
|
/*+----------------------+---------------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+---------------------+
|
||||||
|
| Address Block | 64:ff9b::/96 |
|
||||||
|
| Name | IPv4-IPv6 Translat. |
|
||||||
|
| RFC | [RFC6052] |
|
||||||
|
| Allocation Date | October 2010 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | True |
|
||||||
|
| Destination | True |
|
||||||
|
| Forwardable | True |
|
||||||
|
| Global | True |
|
||||||
|
| Reserved-by-Protocol | False |
|
||||||
|
+----------------------+---------------------+*/
|
||||||
|
MustIPv6Addr("64:ff9b::/96"),
|
||||||
|
|
||||||
|
/*+----------------------+---------------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+---------------------+
|
||||||
|
| Address Block | ::ffff:0:0/96 |
|
||||||
|
| Name | IPv4-mapped Address |
|
||||||
|
| RFC | [RFC4291] |
|
||||||
|
| Allocation Date | February 2006 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | False |
|
||||||
|
| Destination | False |
|
||||||
|
| Forwardable | False |
|
||||||
|
| Global | False |
|
||||||
|
| Reserved-by-Protocol | True |
|
||||||
|
+----------------------+---------------------+*/
|
||||||
|
MustIPv6Addr("::ffff:0:0/96"),
|
||||||
|
|
||||||
|
/*+----------------------+----------------------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+----------------------------+
|
||||||
|
| Address Block | 100::/64 |
|
||||||
|
| Name | Discard-Only Address Block |
|
||||||
|
| RFC | [RFC6666] |
|
||||||
|
| Allocation Date | June 2012 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | True |
|
||||||
|
| Destination | True |
|
||||||
|
| Forwardable | True |
|
||||||
|
| Global | False |
|
||||||
|
| Reserved-by-Protocol | False |
|
||||||
|
+----------------------+----------------------------+*/
|
||||||
|
MustIPv6Addr("100::/64"),
|
||||||
|
|
||||||
|
/*+----------------------+---------------------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+---------------------------+
|
||||||
|
| Address Block | 2001::/23 |
|
||||||
|
| Name | IETF Protocol Assignments |
|
||||||
|
| RFC | [RFC2928] |
|
||||||
|
| Allocation Date | September 2000 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | False[1] |
|
||||||
|
| Destination | False[1] |
|
||||||
|
| Forwardable | False[1] |
|
||||||
|
| Global | False[1] |
|
||||||
|
| Reserved-by-Protocol | False |
|
||||||
|
+----------------------+---------------------------+*/
|
||||||
|
// [1] Unless allowed by a more specific allocation.
|
||||||
|
MustIPv6Addr("2001::/16"),
|
||||||
|
|
||||||
|
/*+----------------------+----------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+----------------+
|
||||||
|
| Address Block | 2001::/32 |
|
||||||
|
| Name | TEREDO |
|
||||||
|
| RFC | [RFC4380] |
|
||||||
|
| Allocation Date | January 2006 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | True |
|
||||||
|
| Destination | True |
|
||||||
|
| Forwardable | True |
|
||||||
|
| Global | False |
|
||||||
|
| Reserved-by-Protocol | False |
|
||||||
|
+----------------------+----------------+*/
|
||||||
|
// Covered by previous entry, included for completeness.
|
||||||
|
//
|
||||||
|
// MustIPv6Addr("2001::/16"),
|
||||||
|
|
||||||
|
/*+----------------------+----------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+----------------+
|
||||||
|
| Address Block | 2001:2::/48 |
|
||||||
|
| Name | Benchmarking |
|
||||||
|
| RFC | [RFC5180] |
|
||||||
|
| Allocation Date | April 2008 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | True |
|
||||||
|
| Destination | True |
|
||||||
|
| Forwardable | True |
|
||||||
|
| Global | False |
|
||||||
|
| Reserved-by-Protocol | False |
|
||||||
|
+----------------------+----------------+*/
|
||||||
|
// Covered by previous entry, included for completeness.
|
||||||
|
//
|
||||||
|
// MustIPv6Addr("2001:2::/48"),
|
||||||
|
|
||||||
|
/*+----------------------+---------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+---------------+
|
||||||
|
| Address Block | 2001:db8::/32 |
|
||||||
|
| Name | Documentation |
|
||||||
|
| RFC | [RFC3849] |
|
||||||
|
| Allocation Date | July 2004 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | False |
|
||||||
|
| Destination | False |
|
||||||
|
| Forwardable | False |
|
||||||
|
| Global | False |
|
||||||
|
| Reserved-by-Protocol | False |
|
||||||
|
+----------------------+---------------+*/
|
||||||
|
// Covered by previous entry, included for completeness.
|
||||||
|
//
|
||||||
|
// MustIPv6Addr("2001:db8::/32"),
|
||||||
|
|
||||||
|
/*+----------------------+--------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+--------------+
|
||||||
|
| Address Block | 2001:10::/28 |
|
||||||
|
| Name | ORCHID |
|
||||||
|
| RFC | [RFC4843] |
|
||||||
|
| Allocation Date | March 2007 |
|
||||||
|
| Termination Date | March 2014 |
|
||||||
|
| Source | False |
|
||||||
|
| Destination | False |
|
||||||
|
| Forwardable | False |
|
||||||
|
| Global | False |
|
||||||
|
| Reserved-by-Protocol | False |
|
||||||
|
+----------------------+--------------+*/
|
||||||
|
// Covered by previous entry, included for completeness.
|
||||||
|
//
|
||||||
|
// MustIPv6Addr("2001:10::/28"),
|
||||||
|
|
||||||
|
/*+----------------------+---------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+---------------+
|
||||||
|
| Address Block | 2002::/16 [2] |
|
||||||
|
| Name | 6to4 |
|
||||||
|
| RFC | [RFC3056] |
|
||||||
|
| Allocation Date | February 2001 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | True |
|
||||||
|
| Destination | True |
|
||||||
|
| Forwardable | True |
|
||||||
|
| Global | N/A [2] |
|
||||||
|
| Reserved-by-Protocol | False |
|
||||||
|
+----------------------+---------------+*/
|
||||||
|
// [2] See [RFC3056] for details.
|
||||||
|
MustIPv6Addr("2002::/16"),
|
||||||
|
|
||||||
|
/*+----------------------+--------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+--------------+
|
||||||
|
| Address Block | fc00::/7 |
|
||||||
|
| Name | Unique-Local |
|
||||||
|
| RFC | [RFC4193] |
|
||||||
|
| Allocation Date | October 2005 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | True |
|
||||||
|
| Destination | True |
|
||||||
|
| Forwardable | True |
|
||||||
|
| Global | False |
|
||||||
|
| Reserved-by-Protocol | False |
|
||||||
|
+----------------------+--------------+*/
|
||||||
|
MustIPv6Addr("fc00::/7"),
|
||||||
|
|
||||||
|
/*+----------------------+-----------------------+
|
||||||
|
| Attribute | Value |
|
||||||
|
+----------------------+-----------------------+
|
||||||
|
| Address Block | fe80::/10 |
|
||||||
|
| Name | Linked-Scoped Unicast |
|
||||||
|
| RFC | [RFC4291] |
|
||||||
|
| Allocation Date | February 2006 |
|
||||||
|
| Termination Date | N/A |
|
||||||
|
| Source | True |
|
||||||
|
| Destination | True |
|
||||||
|
| Forwardable | False |
|
||||||
|
| Global | False |
|
||||||
|
| Reserved-by-Protocol | True |
|
||||||
|
+----------------------+-----------------------+*/
|
||||||
|
MustIPv6Addr("fe80::/10"),
|
||||||
|
},
|
||||||
|
7335: {
|
||||||
|
// [RFC7335] IPv4 Service Continuity Prefix
|
||||||
|
MustIPv4Addr("192.0.0.0/29"), // [RFC7335], §6 IANA Considerations
|
||||||
|
},
|
||||||
|
ForwardingBlacklist: { // Pseudo-RFC
|
||||||
|
// Blacklist of non-forwardable IP blocks taken from RFC6890
|
||||||
|
//
|
||||||
|
// TODO: the attributes for forwardable should be
|
||||||
|
// searcahble and embedded in the main list of RFCs
|
||||||
|
// above.
|
||||||
|
MustIPv4Addr("0.0.0.0/8"),
|
||||||
|
MustIPv4Addr("127.0.0.0/8"),
|
||||||
|
MustIPv4Addr("169.254.0.0/16"),
|
||||||
|
MustIPv4Addr("192.0.0.0/24"),
|
||||||
|
MustIPv4Addr("192.0.2.0/24"),
|
||||||
|
MustIPv4Addr("198.51.100.0/24"),
|
||||||
|
MustIPv4Addr("203.0.113.0/24"),
|
||||||
|
MustIPv4Addr("240.0.0.0/4"),
|
||||||
|
MustIPv4Addr("255.255.255.255/32"),
|
||||||
|
MustIPv6Addr("::1/128"),
|
||||||
|
MustIPv6Addr("::/128"),
|
||||||
|
MustIPv6Addr("::ffff:0:0/96"),
|
||||||
|
|
||||||
|
// There is no way of expressing a whitelist per RFC2928
|
||||||
|
// atm without creating a negative mask, which I don't
|
||||||
|
// want to do atm.
|
||||||
|
//MustIPv6Addr("2001::/23"),
|
||||||
|
|
||||||
|
MustIPv6Addr("2001:db8::/32"),
|
||||||
|
MustIPv6Addr("2001:10::/28"),
|
||||||
|
MustIPv6Addr("fe80::/10"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// VisitAllRFCs iterates over all known RFCs and calls the visitor
|
||||||
|
func VisitAllRFCs(fn func(rfcNum uint, sockaddrs SockAddrs)) {
|
||||||
|
rfcNetMap := KnownRFCs()
|
||||||
|
|
||||||
|
// Blacklist of faux-RFCs. Don't show the world that we're abusing the
|
||||||
|
// RFC system in this library.
|
||||||
|
rfcBlacklist := map[uint]struct{}{
|
||||||
|
ForwardingBlacklist: {},
|
||||||
|
}
|
||||||
|
|
||||||
|
for rfcNum, sas := range rfcNetMap {
|
||||||
|
if _, found := rfcBlacklist[rfcNum]; !found {
|
||||||
|
fn(rfcNum, sas)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
19
vendor/github.com/hashicorp/go-sockaddr/route_info.go
generated
vendored
Normal file
19
vendor/github.com/hashicorp/go-sockaddr/route_info.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
package sockaddr
|
||||||
|
|
||||||
|
// RouteInterface specifies an interface for obtaining memoized route table and
|
||||||
|
// network information from a given OS.
|
||||||
|
type RouteInterface interface {
|
||||||
|
// GetDefaultInterfaceName returns the name of the interface that has a
|
||||||
|
// default route or an error and an empty string if a problem was
|
||||||
|
// encountered.
|
||||||
|
GetDefaultInterfaceName() (string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VisitCommands visits each command used by the platform-specific RouteInfo
|
||||||
|
// implementation.
|
||||||
|
func (ri routeInfo) VisitCommands(fn func(name string, cmd []string)) {
|
||||||
|
for k, v := range ri.cmds {
|
||||||
|
cmds := append([]string(nil), v...)
|
||||||
|
fn(k, cmds)
|
||||||
|
}
|
||||||
|
}
|
36
vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go
generated
vendored
Normal file
36
vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
// +build darwin dragonfly freebsd netbsd openbsd
|
||||||
|
|
||||||
|
package sockaddr
|
||||||
|
|
||||||
|
import "os/exec"
|
||||||
|
|
||||||
|
var cmds map[string][]string = map[string][]string{
|
||||||
|
"route": {"/sbin/route", "-n", "get", "default"},
|
||||||
|
}
|
||||||
|
|
||||||
|
type routeInfo struct {
|
||||||
|
cmds map[string][]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRouteInfo returns a BSD-specific implementation of the RouteInfo
|
||||||
|
// interface.
|
||||||
|
func NewRouteInfo() (routeInfo, error) {
|
||||||
|
return routeInfo{
|
||||||
|
cmds: cmds,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDefaultInterfaceName returns the interface name attached to the default
|
||||||
|
// route on the default interface.
|
||||||
|
func (ri routeInfo) GetDefaultInterfaceName() (string, error) {
|
||||||
|
out, err := exec.Command(cmds["route"][0], cmds["route"][1:]...).Output()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
var ifName string
|
||||||
|
if ifName, err = parseDefaultIfNameFromRoute(string(out)); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return ifName, nil
|
||||||
|
}
|
10
vendor/github.com/hashicorp/go-sockaddr/route_info_default.go
generated
vendored
Normal file
10
vendor/github.com/hashicorp/go-sockaddr/route_info_default.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
// +build android nacl plan9
|
||||||
|
|
||||||
|
package sockaddr
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
// getDefaultIfName is the default interface function for unsupported platforms.
|
||||||
|
func getDefaultIfName() (string, error) {
|
||||||
|
return "", errors.New("No default interface found (unsupported platform)")
|
||||||
|
}
|
40
vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go
generated
vendored
Normal file
40
vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
package sockaddr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"os/exec"
|
||||||
|
)
|
||||||
|
|
||||||
|
type routeInfo struct {
|
||||||
|
cmds map[string][]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRouteInfo returns a Linux-specific implementation of the RouteInfo
|
||||||
|
// interface.
|
||||||
|
func NewRouteInfo() (routeInfo, error) {
|
||||||
|
// CoreOS Container Linux moved ip to /usr/bin/ip, so look it up on
|
||||||
|
// $PATH and fallback to /sbin/ip on error.
|
||||||
|
path, _ := exec.LookPath("ip")
|
||||||
|
if path == "" {
|
||||||
|
path = "/sbin/ip"
|
||||||
|
}
|
||||||
|
|
||||||
|
return routeInfo{
|
||||||
|
cmds: map[string][]string{"ip": {path, "route"}},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDefaultInterfaceName returns the interface name attached to the default
|
||||||
|
// route on the default interface.
|
||||||
|
func (ri routeInfo) GetDefaultInterfaceName() (string, error) {
|
||||||
|
out, err := exec.Command(ri.cmds["ip"][0], ri.cmds["ip"][1:]...).Output()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
var ifName string
|
||||||
|
if ifName, err = parseDefaultIfNameFromIPCmd(string(out)); err != nil {
|
||||||
|
return "", errors.New("No default interface found")
|
||||||
|
}
|
||||||
|
return ifName, nil
|
||||||
|
}
|
37
vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go
generated
vendored
Normal file
37
vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
package sockaddr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"os/exec"
|
||||||
|
)
|
||||||
|
|
||||||
|
var cmds map[string][]string = map[string][]string{
|
||||||
|
"route": {"/usr/sbin/route", "-n", "get", "default"},
|
||||||
|
}
|
||||||
|
|
||||||
|
type routeInfo struct {
|
||||||
|
cmds map[string][]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRouteInfo returns a BSD-specific implementation of the RouteInfo
|
||||||
|
// interface.
|
||||||
|
func NewRouteInfo() (routeInfo, error) {
|
||||||
|
return routeInfo{
|
||||||
|
cmds: cmds,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDefaultInterfaceName returns the interface name attached to the default
|
||||||
|
// route on the default interface.
|
||||||
|
func (ri routeInfo) GetDefaultInterfaceName() (string, error) {
|
||||||
|
out, err := exec.Command(cmds["route"][0], cmds["route"][1:]...).Output()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
var ifName string
|
||||||
|
if ifName, err = parseDefaultIfNameFromRoute(string(out)); err != nil {
|
||||||
|
return "", errors.New("No default interface found")
|
||||||
|
}
|
||||||
|
return ifName, nil
|
||||||
|
}
|
41
vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go
generated
vendored
Normal file
41
vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
package sockaddr
|
||||||
|
|
||||||
|
import "os/exec"
|
||||||
|
|
||||||
|
var cmds map[string][]string = map[string][]string{
|
||||||
|
"netstat": {"netstat", "-rn"},
|
||||||
|
"ipconfig": {"ipconfig"},
|
||||||
|
}
|
||||||
|
|
||||||
|
type routeInfo struct {
|
||||||
|
cmds map[string][]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRouteInfo returns a BSD-specific implementation of the RouteInfo
|
||||||
|
// interface.
|
||||||
|
func NewRouteInfo() (routeInfo, error) {
|
||||||
|
return routeInfo{
|
||||||
|
cmds: cmds,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDefaultInterfaceName returns the interface name attached to the default
|
||||||
|
// route on the default interface.
|
||||||
|
func (ri routeInfo) GetDefaultInterfaceName() (string, error) {
|
||||||
|
ifNameOut, err := exec.Command(cmds["netstat"][0], cmds["netstat"][1:]...).Output()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
ipconfigOut, err := exec.Command(cmds["ipconfig"][0], cmds["ipconfig"][1:]...).Output()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
ifName, err := parseDefaultIfNameWindows(string(ifNameOut), string(ipconfigOut))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ifName, nil
|
||||||
|
}
|
206
vendor/github.com/hashicorp/go-sockaddr/sockaddr.go
generated
vendored
Normal file
206
vendor/github.com/hashicorp/go-sockaddr/sockaddr.go
generated
vendored
Normal file
|
@ -0,0 +1,206 @@
|
||||||
|
package sockaddr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SockAddrType int
|
||||||
|
type AttrName string
|
||||||
|
|
||||||
|
const (
|
||||||
|
TypeUnknown SockAddrType = 0x0
|
||||||
|
TypeUnix = 0x1
|
||||||
|
TypeIPv4 = 0x2
|
||||||
|
TypeIPv6 = 0x4
|
||||||
|
|
||||||
|
// TypeIP is the union of TypeIPv4 and TypeIPv6
|
||||||
|
TypeIP = 0x6
|
||||||
|
)
|
||||||
|
|
||||||
|
type SockAddr interface {
|
||||||
|
// CmpRFC returns 0 if SockAddr exactly matches one of the matched RFC
|
||||||
|
// networks, -1 if the receiver is contained within the RFC network, or
|
||||||
|
// 1 if the address is not contained within the RFC.
|
||||||
|
CmpRFC(rfcNum uint, sa SockAddr) int
|
||||||
|
|
||||||
|
// Contains returns true if the SockAddr arg is contained within the
|
||||||
|
// receiver
|
||||||
|
Contains(SockAddr) bool
|
||||||
|
|
||||||
|
// Equal allows for the comparison of two SockAddrs
|
||||||
|
Equal(SockAddr) bool
|
||||||
|
|
||||||
|
DialPacketArgs() (string, string)
|
||||||
|
DialStreamArgs() (string, string)
|
||||||
|
ListenPacketArgs() (string, string)
|
||||||
|
ListenStreamArgs() (string, string)
|
||||||
|
|
||||||
|
// String returns the string representation of SockAddr
|
||||||
|
String() string
|
||||||
|
|
||||||
|
// Type returns the SockAddrType
|
||||||
|
Type() SockAddrType
|
||||||
|
}
|
||||||
|
|
||||||
|
// sockAddrAttrMap is a map of the SockAddr type-specific attributes.
|
||||||
|
var sockAddrAttrMap map[AttrName]func(SockAddr) string
|
||||||
|
var sockAddrAttrs []AttrName
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
sockAddrInit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new SockAddr from the string. The order in which New()
|
||||||
|
// attempts to construct a SockAddr is: IPv4Addr, IPv6Addr, SockAddrUnix.
|
||||||
|
//
|
||||||
|
// NOTE: New() relies on the heuristic wherein if the path begins with either a
|
||||||
|
// '.' or '/' character before creating a new UnixSock. For UNIX sockets that
|
||||||
|
// are absolute paths or are nested within a sub-directory, this works as
|
||||||
|
// expected, however if the UNIX socket is contained in the current working
|
||||||
|
// directory, this will fail unless the path begins with "./"
|
||||||
|
// (e.g. "./my-local-socket"). Calls directly to NewUnixSock() do not suffer
|
||||||
|
// this limitation. Invalid IP addresses such as "256.0.0.0/-1" will run afoul
|
||||||
|
// of this heuristic and be assumed to be a valid UNIX socket path (which they
|
||||||
|
// are, but it is probably not what you want and you won't realize it until you
|
||||||
|
// stat(2) the file system to discover it doesn't exist).
|
||||||
|
func NewSockAddr(s string) (SockAddr, error) {
|
||||||
|
ipv4Addr, err := NewIPv4Addr(s)
|
||||||
|
if err == nil {
|
||||||
|
return ipv4Addr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ipv6Addr, err := NewIPv6Addr(s)
|
||||||
|
if err == nil {
|
||||||
|
return ipv6Addr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check to make sure the string begins with either a '.' or '/', or
|
||||||
|
// contains a '/'.
|
||||||
|
if len(s) > 1 && (strings.IndexAny(s[0:1], "./") != -1 || strings.IndexByte(s, '/') != -1) {
|
||||||
|
unixSock, err := NewUnixSock(s)
|
||||||
|
if err == nil {
|
||||||
|
return unixSock, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("Unable to convert %q to an IPv4 or IPv6 address, or a UNIX Socket", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToIPAddr returns an IPAddr type or nil if the type conversion fails.
|
||||||
|
func ToIPAddr(sa SockAddr) *IPAddr {
|
||||||
|
ipa, ok := sa.(IPAddr)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &ipa
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToIPv4Addr returns an IPv4Addr type or nil if the type conversion fails.
|
||||||
|
func ToIPv4Addr(sa SockAddr) *IPv4Addr {
|
||||||
|
switch v := sa.(type) {
|
||||||
|
case IPv4Addr:
|
||||||
|
return &v
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToIPv6Addr returns an IPv6Addr type or nil if the type conversion fails.
|
||||||
|
func ToIPv6Addr(sa SockAddr) *IPv6Addr {
|
||||||
|
switch v := sa.(type) {
|
||||||
|
case IPv6Addr:
|
||||||
|
return &v
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToUnixSock returns a UnixSock type or nil if the type conversion fails.
|
||||||
|
func ToUnixSock(sa SockAddr) *UnixSock {
|
||||||
|
switch v := sa.(type) {
|
||||||
|
case UnixSock:
|
||||||
|
return &v
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SockAddrAttr returns a string representation of an attribute for the given
|
||||||
|
// SockAddr.
|
||||||
|
func SockAddrAttr(sa SockAddr, selector AttrName) string {
|
||||||
|
fn, found := sockAddrAttrMap[selector]
|
||||||
|
if !found {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return fn(sa)
|
||||||
|
}
|
||||||
|
|
||||||
|
// String() for SockAddrType returns a string representation of the
|
||||||
|
// SockAddrType (e.g. "IPv4", "IPv6", "UNIX", "IP", or "unknown").
|
||||||
|
func (sat SockAddrType) String() string {
|
||||||
|
switch sat {
|
||||||
|
case TypeIPv4:
|
||||||
|
return "IPv4"
|
||||||
|
case TypeIPv6:
|
||||||
|
return "IPv6"
|
||||||
|
// There is no concrete "IP" type. Leaving here as a reminder.
|
||||||
|
// case TypeIP:
|
||||||
|
// return "IP"
|
||||||
|
case TypeUnix:
|
||||||
|
return "UNIX"
|
||||||
|
default:
|
||||||
|
panic("unsupported type")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sockAddrInit is called once at init()
|
||||||
|
func sockAddrInit() {
|
||||||
|
sockAddrAttrs = []AttrName{
|
||||||
|
"type", // type should be first
|
||||||
|
"string",
|
||||||
|
}
|
||||||
|
|
||||||
|
sockAddrAttrMap = map[AttrName]func(sa SockAddr) string{
|
||||||
|
"string": func(sa SockAddr) string {
|
||||||
|
return sa.String()
|
||||||
|
},
|
||||||
|
"type": func(sa SockAddr) string {
|
||||||
|
return sa.Type().String()
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnixSockAttrs returns a list of attributes supported by the UnixSock type
|
||||||
|
func SockAddrAttrs() []AttrName {
|
||||||
|
return sockAddrAttrs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Although this is pretty trivial to do in a program, having the logic here is
|
||||||
|
// useful all around. Note that this marshals into a *string* -- the underlying
|
||||||
|
// string representation of the sockaddr. If you then unmarshal into this type
|
||||||
|
// in Go, all will work as expected, but externally you can take what comes out
|
||||||
|
// and use the string value directly.
|
||||||
|
type SockAddrMarshaler struct {
|
||||||
|
SockAddr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SockAddrMarshaler) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(s.SockAddr.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SockAddrMarshaler) UnmarshalJSON(in []byte) error {
|
||||||
|
var str string
|
||||||
|
err := json.Unmarshal(in, &str)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sa, err := NewSockAddr(str)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.SockAddr = sa
|
||||||
|
return nil
|
||||||
|
}
|
193
vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go
generated
vendored
Normal file
193
vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go
generated
vendored
Normal file
|
@ -0,0 +1,193 @@
|
||||||
|
package sockaddr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SockAddrs is a slice of SockAddrs
|
||||||
|
type SockAddrs []SockAddr
|
||||||
|
|
||||||
|
func (s SockAddrs) Len() int { return len(s) }
|
||||||
|
func (s SockAddrs) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
|
||||||
|
// CmpAddrFunc is the function signature that must be met to be used in the
|
||||||
|
// OrderedAddrBy multiAddrSorter
|
||||||
|
type CmpAddrFunc func(p1, p2 *SockAddr) int
|
||||||
|
|
||||||
|
// multiAddrSorter implements the Sort interface, sorting the SockAddrs within.
|
||||||
|
type multiAddrSorter struct {
|
||||||
|
addrs SockAddrs
|
||||||
|
cmp []CmpAddrFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort sorts the argument slice according to the Cmp functions passed to
|
||||||
|
// OrderedAddrBy.
|
||||||
|
func (ms *multiAddrSorter) Sort(sockAddrs SockAddrs) {
|
||||||
|
ms.addrs = sockAddrs
|
||||||
|
sort.Sort(ms)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OrderedAddrBy sorts SockAddr by the list of sort function pointers.
|
||||||
|
func OrderedAddrBy(cmpFuncs ...CmpAddrFunc) *multiAddrSorter {
|
||||||
|
return &multiAddrSorter{
|
||||||
|
cmp: cmpFuncs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len is part of sort.Interface.
|
||||||
|
func (ms *multiAddrSorter) Len() int {
|
||||||
|
return len(ms.addrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Less is part of sort.Interface. It is implemented by looping along the
|
||||||
|
// Cmp() functions until it finds a comparison that is either less than,
|
||||||
|
// equal to, or greater than.
|
||||||
|
func (ms *multiAddrSorter) Less(i, j int) bool {
|
||||||
|
p, q := &ms.addrs[i], &ms.addrs[j]
|
||||||
|
// Try all but the last comparison.
|
||||||
|
var k int
|
||||||
|
for k = 0; k < len(ms.cmp)-1; k++ {
|
||||||
|
cmp := ms.cmp[k]
|
||||||
|
x := cmp(p, q)
|
||||||
|
switch x {
|
||||||
|
case -1:
|
||||||
|
// p < q, so we have a decision.
|
||||||
|
return true
|
||||||
|
case 1:
|
||||||
|
// p > q, so we have a decision.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// p == q; try the next comparison.
|
||||||
|
}
|
||||||
|
// All comparisons to here said "equal", so just return whatever the
|
||||||
|
// final comparison reports.
|
||||||
|
switch ms.cmp[k](p, q) {
|
||||||
|
case -1:
|
||||||
|
return true
|
||||||
|
case 1:
|
||||||
|
return false
|
||||||
|
default:
|
||||||
|
// Still a tie! Now what?
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Swap is part of sort.Interface.
|
||||||
|
func (ms *multiAddrSorter) Swap(i, j int) {
|
||||||
|
ms.addrs[i], ms.addrs[j] = ms.addrs[j], ms.addrs[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// NOTE (sean@): These constants are here for code readability only and
|
||||||
|
// are sprucing up the code for readability purposes. Some of the
|
||||||
|
// Cmp*() variants have confusing logic (especially when dealing with
|
||||||
|
// mixed-type comparisons) and this, I think, has made it easier to grok
|
||||||
|
// the code faster.
|
||||||
|
sortReceiverBeforeArg = -1
|
||||||
|
sortDeferDecision = 0
|
||||||
|
sortArgBeforeReceiver = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
// AscAddress is a sorting function to sort SockAddrs by their respective
|
||||||
|
// address type. Non-equal types are deferred in the sort.
|
||||||
|
func AscAddress(p1Ptr, p2Ptr *SockAddr) int {
|
||||||
|
p1 := *p1Ptr
|
||||||
|
p2 := *p2Ptr
|
||||||
|
|
||||||
|
switch v := p1.(type) {
|
||||||
|
case IPv4Addr:
|
||||||
|
return v.CmpAddress(p2)
|
||||||
|
case IPv6Addr:
|
||||||
|
return v.CmpAddress(p2)
|
||||||
|
case UnixSock:
|
||||||
|
return v.CmpAddress(p2)
|
||||||
|
default:
|
||||||
|
return sortDeferDecision
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AscPort is a sorting function to sort SockAddrs by their respective address
|
||||||
|
// type. Non-equal types are deferred in the sort.
|
||||||
|
func AscPort(p1Ptr, p2Ptr *SockAddr) int {
|
||||||
|
p1 := *p1Ptr
|
||||||
|
p2 := *p2Ptr
|
||||||
|
|
||||||
|
switch v := p1.(type) {
|
||||||
|
case IPv4Addr:
|
||||||
|
return v.CmpPort(p2)
|
||||||
|
case IPv6Addr:
|
||||||
|
return v.CmpPort(p2)
|
||||||
|
default:
|
||||||
|
return sortDeferDecision
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AscPrivate is a sorting function to sort "more secure" private values before
|
||||||
|
// "more public" values. Both IPv4 and IPv6 are compared against RFC6890
|
||||||
|
// (RFC6890 includes, and is not limited to, RFC1918 and RFC6598 for IPv4, and
|
||||||
|
// IPv6 includes RFC4193).
|
||||||
|
func AscPrivate(p1Ptr, p2Ptr *SockAddr) int {
|
||||||
|
p1 := *p1Ptr
|
||||||
|
p2 := *p2Ptr
|
||||||
|
|
||||||
|
switch v := p1.(type) {
|
||||||
|
case IPv4Addr, IPv6Addr:
|
||||||
|
return v.CmpRFC(6890, p2)
|
||||||
|
default:
|
||||||
|
return sortDeferDecision
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AscNetworkSize is a sorting function to sort SockAddrs based on their network
|
||||||
|
// size. Non-equal types are deferred in the sort.
|
||||||
|
func AscNetworkSize(p1Ptr, p2Ptr *SockAddr) int {
|
||||||
|
p1 := *p1Ptr
|
||||||
|
p2 := *p2Ptr
|
||||||
|
p1Type := p1.Type()
|
||||||
|
p2Type := p2.Type()
|
||||||
|
|
||||||
|
// Network size operations on non-IP types make no sense
|
||||||
|
if p1Type != p2Type && p1Type != TypeIP {
|
||||||
|
return sortDeferDecision
|
||||||
|
}
|
||||||
|
|
||||||
|
ipA := p1.(IPAddr)
|
||||||
|
ipB := p2.(IPAddr)
|
||||||
|
|
||||||
|
return bytes.Compare([]byte(*ipA.NetIPMask()), []byte(*ipB.NetIPMask()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// AscType is a sorting function to sort "more secure" types before
|
||||||
|
// "less-secure" types.
|
||||||
|
func AscType(p1Ptr, p2Ptr *SockAddr) int {
|
||||||
|
p1 := *p1Ptr
|
||||||
|
p2 := *p2Ptr
|
||||||
|
p1Type := p1.Type()
|
||||||
|
p2Type := p2.Type()
|
||||||
|
switch {
|
||||||
|
case p1Type < p2Type:
|
||||||
|
return sortReceiverBeforeArg
|
||||||
|
case p1Type == p2Type:
|
||||||
|
return sortDeferDecision
|
||||||
|
case p1Type > p2Type:
|
||||||
|
return sortArgBeforeReceiver
|
||||||
|
default:
|
||||||
|
return sortDeferDecision
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterByType returns two lists: a list of matched and unmatched SockAddrs
|
||||||
|
func (sas SockAddrs) FilterByType(type_ SockAddrType) (matched, excluded SockAddrs) {
|
||||||
|
matched = make(SockAddrs, 0, len(sas))
|
||||||
|
excluded = make(SockAddrs, 0, len(sas))
|
||||||
|
|
||||||
|
for _, sa := range sas {
|
||||||
|
if sa.Type()&type_ != 0 {
|
||||||
|
matched = append(matched, sa)
|
||||||
|
} else {
|
||||||
|
excluded = append(excluded, sa)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return matched, excluded
|
||||||
|
}
|
135
vendor/github.com/hashicorp/go-sockaddr/unixsock.go
generated
vendored
Normal file
135
vendor/github.com/hashicorp/go-sockaddr/unixsock.go
generated
vendored
Normal file
|
@ -0,0 +1,135 @@
|
||||||
|
package sockaddr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type UnixSock struct {
|
||||||
|
SockAddr
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
type UnixSocks []*UnixSock
|
||||||
|
|
||||||
|
// unixAttrMap is a map of the UnixSockAddr type-specific attributes.
|
||||||
|
var unixAttrMap map[AttrName]func(UnixSock) string
|
||||||
|
var unixAttrs []AttrName
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
unixAttrInit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUnixSock creates an UnixSock from a string path. String can be in the
|
||||||
|
// form of either URI-based string (e.g. `file:///etc/passwd`), an absolute
|
||||||
|
// path (e.g. `/etc/passwd`), or a relative path (e.g. `./foo`).
|
||||||
|
func NewUnixSock(s string) (ret UnixSock, err error) {
|
||||||
|
ret.path = s
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CmpAddress follows the Cmp() standard protocol and returns:
|
||||||
|
//
|
||||||
|
// - -1 If the receiver should sort first because its name lexically sorts before arg
|
||||||
|
// - 0 if the SockAddr arg is not a UnixSock, or is a UnixSock with the same path.
|
||||||
|
// - 1 If the argument should sort first.
|
||||||
|
func (us UnixSock) CmpAddress(sa SockAddr) int {
|
||||||
|
usb, ok := sa.(UnixSock)
|
||||||
|
if !ok {
|
||||||
|
return sortDeferDecision
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Compare(us.Path(), usb.Path())
|
||||||
|
}
|
||||||
|
|
||||||
|
// DialPacketArgs returns the arguments required to be passed to net.DialUnix()
|
||||||
|
// with the `unixgram` network type.
|
||||||
|
func (us UnixSock) DialPacketArgs() (network, dialArgs string) {
|
||||||
|
return "unixgram", us.path
|
||||||
|
}
|
||||||
|
|
||||||
|
// DialStreamArgs returns the arguments required to be passed to net.DialUnix()
|
||||||
|
// with the `unix` network type.
|
||||||
|
func (us UnixSock) DialStreamArgs() (network, dialArgs string) {
|
||||||
|
return "unix", us.path
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal returns true if a SockAddr is equal to the receiving UnixSock.
|
||||||
|
func (us UnixSock) Equal(sa SockAddr) bool {
|
||||||
|
usb, ok := sa.(UnixSock)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if us.Path() != usb.Path() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListenPacketArgs returns the arguments required to be passed to
|
||||||
|
// net.ListenUnixgram() with the `unixgram` network type.
|
||||||
|
func (us UnixSock) ListenPacketArgs() (network, dialArgs string) {
|
||||||
|
return "unixgram", us.path
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListenStreamArgs returns the arguments required to be passed to
|
||||||
|
// net.ListenUnix() with the `unix` network type.
|
||||||
|
func (us UnixSock) ListenStreamArgs() (network, dialArgs string) {
|
||||||
|
return "unix", us.path
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustUnixSock is a helper method that must return an UnixSock or panic on
|
||||||
|
// invalid input.
|
||||||
|
func MustUnixSock(addr string) UnixSock {
|
||||||
|
us, err := NewUnixSock(addr)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("Unable to create a UnixSock from %+q: %v", addr, err))
|
||||||
|
}
|
||||||
|
return us
|
||||||
|
}
|
||||||
|
|
||||||
|
// Path returns the given path of the UnixSock
|
||||||
|
func (us UnixSock) Path() string {
|
||||||
|
return us.path
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the path of the UnixSock
|
||||||
|
func (us UnixSock) String() string {
|
||||||
|
return fmt.Sprintf("%+q", us.path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type is used as a type switch and returns TypeUnix
|
||||||
|
func (UnixSock) Type() SockAddrType {
|
||||||
|
return TypeUnix
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnixSockAttrs returns a list of attributes supported by the UnixSockAddr type
|
||||||
|
func UnixSockAttrs() []AttrName {
|
||||||
|
return unixAttrs
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnixSockAttr returns a string representation of an attribute for the given
|
||||||
|
// UnixSock.
|
||||||
|
func UnixSockAttr(us UnixSock, attrName AttrName) string {
|
||||||
|
fn, found := unixAttrMap[attrName]
|
||||||
|
if !found {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return fn(us)
|
||||||
|
}
|
||||||
|
|
||||||
|
// unixAttrInit is called once at init()
|
||||||
|
func unixAttrInit() {
|
||||||
|
// Sorted for human readability
|
||||||
|
unixAttrs = []AttrName{
|
||||||
|
"path",
|
||||||
|
}
|
||||||
|
|
||||||
|
unixAttrMap = map[AttrName]func(us UnixSock) string{
|
||||||
|
"path": func(us UnixSock) string {
|
||||||
|
return us.Path()
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
9
vendor/github.com/hashicorp/hcl/.gitignore
generated
vendored
9
vendor/github.com/hashicorp/hcl/.gitignore
generated
vendored
|
@ -1,9 +0,0 @@
|
||||||
y.output
|
|
||||||
|
|
||||||
# ignore intellij files
|
|
||||||
.idea
|
|
||||||
*.iml
|
|
||||||
*.ipr
|
|
||||||
*.iws
|
|
||||||
|
|
||||||
*.test
|
|
3
vendor/github.com/hashicorp/hcl/.travis.yml
generated
vendored
3
vendor/github.com/hashicorp/hcl/.travis.yml
generated
vendored
|
@ -1,3 +0,0 @@
|
||||||
sudo: false
|
|
||||||
language: go
|
|
||||||
go: 1.5
|
|
17
vendor/github.com/hashicorp/hcl/Makefile
generated
vendored
17
vendor/github.com/hashicorp/hcl/Makefile
generated
vendored
|
@ -1,17 +0,0 @@
|
||||||
TEST?=./...
|
|
||||||
|
|
||||||
default: test
|
|
||||||
|
|
||||||
fmt: generate
|
|
||||||
go fmt ./...
|
|
||||||
|
|
||||||
test: generate
|
|
||||||
go test $(TEST) $(TESTARGS)
|
|
||||||
|
|
||||||
generate:
|
|
||||||
go generate ./...
|
|
||||||
|
|
||||||
updatedeps:
|
|
||||||
go get -u golang.org/x/tools/cmd/stringer
|
|
||||||
|
|
||||||
.PHONY: default generate test updatedeps
|
|
104
vendor/github.com/hashicorp/hcl/README.md
generated
vendored
104
vendor/github.com/hashicorp/hcl/README.md
generated
vendored
|
@ -1,104 +0,0 @@
|
||||||
# HCL
|
|
||||||
|
|
||||||
[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl)
|
|
||||||
|
|
||||||
HCL (HashiCorp Configuration Language) is a configuration language built
|
|
||||||
by HashiCorp. The goal of HCL is to build a structured configuration language
|
|
||||||
that is both human and machine friendly for use with command-line tools, but
|
|
||||||
specifically targeted towards DevOps tools, servers, etc.
|
|
||||||
|
|
||||||
HCL is also fully JSON compatible. That is, JSON can be used as completely
|
|
||||||
valid input to a system expecting HCL. This helps makes systems
|
|
||||||
interoperable with other systems.
|
|
||||||
|
|
||||||
HCL is heavily inspired by
|
|
||||||
[libucl](https://github.com/vstakhov/libucl),
|
|
||||||
nginx configuration, and others similar.
|
|
||||||
|
|
||||||
## Why?
|
|
||||||
|
|
||||||
A common question when viewing HCL is to ask the question: why not
|
|
||||||
JSON, YAML, etc.?
|
|
||||||
|
|
||||||
Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com)
|
|
||||||
used a variety of configuration languages from full programming languages
|
|
||||||
such as Ruby to complete data structure languages such as JSON. What we
|
|
||||||
learned is that some people wanted human-friendly configuration languages
|
|
||||||
and some people wanted machine-friendly languages.
|
|
||||||
|
|
||||||
JSON fits a nice balance in this, but is fairly verbose and most
|
|
||||||
importantly doesn't support comments. With YAML, we found that beginners
|
|
||||||
had a really hard time determining what the actual structure was, and
|
|
||||||
ended up guessing more often than not whether to use a hyphen, colon, etc.
|
|
||||||
in order to represent some configuration key.
|
|
||||||
|
|
||||||
Full programming languages such as Ruby enable complex behavior
|
|
||||||
a configuration language shouldn't usually allow, and also forces
|
|
||||||
people to learn some set of Ruby.
|
|
||||||
|
|
||||||
Because of this, we decided to create our own configuration language
|
|
||||||
that is JSON-compatible. Our configuration language (HCL) is designed
|
|
||||||
to be written and modified by humans. The API for HCL allows JSON
|
|
||||||
as an input so that it is also machine-friendly (machines can generate
|
|
||||||
JSON instead of trying to generate HCL).
|
|
||||||
|
|
||||||
Our goal with HCL is not to alienate other configuration languages.
|
|
||||||
It is instead to provide HCL as a specialized language for our tools,
|
|
||||||
and JSON as the interoperability layer.
|
|
||||||
|
|
||||||
## Syntax
|
|
||||||
|
|
||||||
For a complete grammar, please see the parser itself. A high-level overview
|
|
||||||
of the syntax and grammar is listed here.
|
|
||||||
|
|
||||||
* Single line comments start with `#` or `//`
|
|
||||||
|
|
||||||
* Multi-line comments are wrapped in `/*` and `*/`. Nested block comments
|
|
||||||
are not allowed. A multi-line comment (also known as a block comment)
|
|
||||||
terminates at the first `*/` found.
|
|
||||||
|
|
||||||
* Values are assigned with the syntax `key = value` (whitespace doesn't
|
|
||||||
matter). The value can be any primitive: a string, number, boolean,
|
|
||||||
object, or list.
|
|
||||||
|
|
||||||
* Strings are double-quoted and can contain any UTF-8 characters.
|
|
||||||
Example: `"Hello, World"`
|
|
||||||
|
|
||||||
* Multi-line strings start with `<<EOF` at the end of a line, and end
|
|
||||||
with `EOF` on its own line ([here documents](https://en.wikipedia.org/wiki/Here_document)).
|
|
||||||
Any text may be used in place of `EOF`. Example:
|
|
||||||
```
|
|
||||||
<<FOO
|
|
||||||
hello
|
|
||||||
world
|
|
||||||
FOO
|
|
||||||
```
|
|
||||||
|
|
||||||
* Numbers are assumed to be base 10. If you prefix a number with 0x,
|
|
||||||
it is treated as a hexadecimal. If it is prefixed with 0, it is
|
|
||||||
treated as an octal. Numbers can be in scientific notation: "1e10".
|
|
||||||
|
|
||||||
* Boolean values: `true`, `false`
|
|
||||||
|
|
||||||
* Arrays can be made by wrapping it in `[]`. Example:
|
|
||||||
`["foo", "bar", 42]`. Arrays can contain primitives
|
|
||||||
and other arrays, but cannot contain objects. Objects must
|
|
||||||
use the block syntax shown below.
|
|
||||||
|
|
||||||
Objects and nested objects are created using the structure shown below:
|
|
||||||
|
|
||||||
```
|
|
||||||
variable "ami" {
|
|
||||||
description = "the AMI to use"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Thanks
|
|
||||||
|
|
||||||
Thanks to:
|
|
||||||
|
|
||||||
* [@vstakhov](https://github.com/vstakhov) - The original libucl parser
|
|
||||||
and syntax that HCL was based off of.
|
|
||||||
|
|
||||||
* [@fatih](https://github.com/fatih) - The rewritten HCL parser
|
|
||||||
in pure Go (no goyacc) and support for a printer.
|
|
16
vendor/github.com/hashicorp/hcl/appveyor.yml
generated
vendored
16
vendor/github.com/hashicorp/hcl/appveyor.yml
generated
vendored
|
@ -1,16 +0,0 @@
|
||||||
version: "build-{branch}-{build}"
|
|
||||||
image: Visual Studio 2015
|
|
||||||
clone_folder: c:\gopath\src\github.com\hashicorp\hcl
|
|
||||||
environment:
|
|
||||||
GOPATH: c:\gopath
|
|
||||||
init:
|
|
||||||
- git config --global core.autocrlf true
|
|
||||||
install:
|
|
||||||
- cmd: >-
|
|
||||||
echo %Path%
|
|
||||||
|
|
||||||
go version
|
|
||||||
|
|
||||||
go env
|
|
||||||
build_script:
|
|
||||||
- cmd: go test -v ./...
|
|
130
vendor/github.com/hashicorp/hcl/decoder.go
generated
vendored
130
vendor/github.com/hashicorp/hcl/decoder.go
generated
vendored
|
@ -89,9 +89,9 @@ func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error
|
||||||
switch k.Kind() {
|
switch k.Kind() {
|
||||||
case reflect.Bool:
|
case reflect.Bool:
|
||||||
return d.decodeBool(name, node, result)
|
return d.decodeBool(name, node, result)
|
||||||
case reflect.Float64:
|
case reflect.Float32, reflect.Float64:
|
||||||
return d.decodeFloat(name, node, result)
|
return d.decodeFloat(name, node, result)
|
||||||
case reflect.Int:
|
case reflect.Int, reflect.Int32, reflect.Int64:
|
||||||
return d.decodeInt(name, node, result)
|
return d.decodeInt(name, node, result)
|
||||||
case reflect.Interface:
|
case reflect.Interface:
|
||||||
// When we see an interface, we make our own thing
|
// When we see an interface, we make our own thing
|
||||||
|
@ -117,10 +117,17 @@ func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error
|
||||||
func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error {
|
func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error {
|
||||||
switch n := node.(type) {
|
switch n := node.(type) {
|
||||||
case *ast.LiteralType:
|
case *ast.LiteralType:
|
||||||
if n.Token.Type == token.BOOL {
|
switch n.Token.Type {
|
||||||
v, err := strconv.ParseBool(n.Token.Text)
|
case token.BOOL, token.STRING, token.NUMBER:
|
||||||
if err != nil {
|
var v bool
|
||||||
return err
|
s := strings.ToLower(strings.Replace(n.Token.Text, "\"", "", -1))
|
||||||
|
switch s {
|
||||||
|
case "1", "true":
|
||||||
|
v = true
|
||||||
|
case "0", "false":
|
||||||
|
v = false
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("decodeBool: Unknown value for boolean: %s", n.Token.Text)
|
||||||
}
|
}
|
||||||
|
|
||||||
result.Set(reflect.ValueOf(v))
|
result.Set(reflect.ValueOf(v))
|
||||||
|
@ -137,13 +144,13 @@ func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) e
|
||||||
func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
|
func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
|
||||||
switch n := node.(type) {
|
switch n := node.(type) {
|
||||||
case *ast.LiteralType:
|
case *ast.LiteralType:
|
||||||
if n.Token.Type == token.FLOAT {
|
if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER {
|
||||||
v, err := strconv.ParseFloat(n.Token.Text, 64)
|
v, err := strconv.ParseFloat(n.Token.Text, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
result.Set(reflect.ValueOf(v))
|
result.Set(reflect.ValueOf(v).Convert(result.Type()))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -164,7 +171,11 @@ func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) er
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if result.Kind() == reflect.Interface {
|
||||||
result.Set(reflect.ValueOf(int(v)))
|
result.Set(reflect.ValueOf(int(v)))
|
||||||
|
} else {
|
||||||
|
result.SetInt(v)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
case token.STRING:
|
case token.STRING:
|
||||||
v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
|
v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
|
||||||
|
@ -172,7 +183,11 @@ func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) er
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if result.Kind() == reflect.Interface {
|
||||||
result.Set(reflect.ValueOf(int(v)))
|
result.Set(reflect.ValueOf(int(v)))
|
||||||
|
} else {
|
||||||
|
result.SetInt(v)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -409,7 +424,6 @@ func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value)
|
||||||
if result.Kind() == reflect.Interface {
|
if result.Kind() == reflect.Interface {
|
||||||
result = result.Elem()
|
result = result.Elem()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the slice if it isn't nil
|
// Create the slice if it isn't nil
|
||||||
resultType := result.Type()
|
resultType := result.Type()
|
||||||
resultElemType := resultType.Elem()
|
resultElemType := resultType.Elem()
|
||||||
|
@ -443,6 +457,12 @@ func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value)
|
||||||
|
|
||||||
// Decode
|
// Decode
|
||||||
val := reflect.Indirect(reflect.New(resultElemType))
|
val := reflect.Indirect(reflect.New(resultElemType))
|
||||||
|
|
||||||
|
// if item is an object that was decoded from ambiguous JSON and
|
||||||
|
// flattened, make sure it's expanded if it needs to decode into a
|
||||||
|
// defined structure.
|
||||||
|
item := expandObject(item, val)
|
||||||
|
|
||||||
if err := d.decode(fieldName, item, val); err != nil {
|
if err := d.decode(fieldName, item, val); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -455,6 +475,57 @@ func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// expandObject detects if an ambiguous JSON object was flattened to a List which
|
||||||
|
// should be decoded into a struct, and expands the ast to properly deocode.
|
||||||
|
func expandObject(node ast.Node, result reflect.Value) ast.Node {
|
||||||
|
item, ok := node.(*ast.ObjectItem)
|
||||||
|
if !ok {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
elemType := result.Type()
|
||||||
|
|
||||||
|
// our target type must be a struct
|
||||||
|
switch elemType.Kind() {
|
||||||
|
case reflect.Ptr:
|
||||||
|
switch elemType.Elem().Kind() {
|
||||||
|
case reflect.Struct:
|
||||||
|
//OK
|
||||||
|
default:
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
case reflect.Struct:
|
||||||
|
//OK
|
||||||
|
default:
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// A list value will have a key and field name. If it had more fields,
|
||||||
|
// it wouldn't have been flattened.
|
||||||
|
if len(item.Keys) != 2 {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
keyToken := item.Keys[0].Token
|
||||||
|
item.Keys = item.Keys[1:]
|
||||||
|
|
||||||
|
// we need to un-flatten the ast enough to decode
|
||||||
|
newNode := &ast.ObjectItem{
|
||||||
|
Keys: []*ast.ObjectKey{
|
||||||
|
&ast.ObjectKey{
|
||||||
|
Token: keyToken,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Val: &ast.ObjectType{
|
||||||
|
List: &ast.ObjectList{
|
||||||
|
Items: []*ast.ObjectItem{item},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return newNode
|
||||||
|
}
|
||||||
|
|
||||||
func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
|
func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
|
||||||
switch n := node.(type) {
|
switch n := node.(type) {
|
||||||
case *ast.LiteralType:
|
case *ast.LiteralType:
|
||||||
|
@ -489,7 +560,7 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
|
||||||
// the yacc parser would always ensure top-level elements were arrays. The new
|
// the yacc parser would always ensure top-level elements were arrays. The new
|
||||||
// parser does not make the same guarantees, thus we need to convert any
|
// parser does not make the same guarantees, thus we need to convert any
|
||||||
// top-level literal elements into a list.
|
// top-level literal elements into a list.
|
||||||
if _, ok := node.(*ast.LiteralType); ok {
|
if _, ok := node.(*ast.LiteralType); ok && item != nil {
|
||||||
node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
|
node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -509,7 +580,11 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
|
||||||
|
|
||||||
// Compile the list of all the fields that we're going to be decoding
|
// Compile the list of all the fields that we're going to be decoding
|
||||||
// from all the structs.
|
// from all the structs.
|
||||||
fields := make(map[*reflect.StructField]reflect.Value)
|
type field struct {
|
||||||
|
field reflect.StructField
|
||||||
|
val reflect.Value
|
||||||
|
}
|
||||||
|
fields := []field{}
|
||||||
for len(structs) > 0 {
|
for len(structs) > 0 {
|
||||||
structVal := structs[0]
|
structVal := structs[0]
|
||||||
structs = structs[1:]
|
structs = structs[1:]
|
||||||
|
@ -517,6 +592,12 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
|
||||||
structType := structVal.Type()
|
structType := structVal.Type()
|
||||||
for i := 0; i < structType.NumField(); i++ {
|
for i := 0; i < structType.NumField(); i++ {
|
||||||
fieldType := structType.Field(i)
|
fieldType := structType.Field(i)
|
||||||
|
tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
|
||||||
|
|
||||||
|
// Ignore fields with tag name "-"
|
||||||
|
if tagParts[0] == "-" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
if fieldType.Anonymous {
|
if fieldType.Anonymous {
|
||||||
fieldKind := fieldType.Type.Kind()
|
fieldKind := fieldType.Type.Kind()
|
||||||
|
@ -531,7 +612,6 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
|
||||||
// We have an embedded field. We "squash" the fields down
|
// We have an embedded field. We "squash" the fields down
|
||||||
// if specified in the tag.
|
// if specified in the tag.
|
||||||
squash := false
|
squash := false
|
||||||
tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
|
|
||||||
for _, tag := range tagParts[1:] {
|
for _, tag := range tagParts[1:] {
|
||||||
if tag == "squash" {
|
if tag == "squash" {
|
||||||
squash = true
|
squash = true
|
||||||
|
@ -547,7 +627,7 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Normal struct field, store it away
|
// Normal struct field, store it away
|
||||||
fields[&fieldType] = structVal.Field(i)
|
fields = append(fields, field{fieldType, structVal.Field(i)})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -555,26 +635,27 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
|
||||||
decodedFields := make([]string, 0, len(fields))
|
decodedFields := make([]string, 0, len(fields))
|
||||||
decodedFieldsVal := make([]reflect.Value, 0)
|
decodedFieldsVal := make([]reflect.Value, 0)
|
||||||
unusedKeysVal := make([]reflect.Value, 0)
|
unusedKeysVal := make([]reflect.Value, 0)
|
||||||
for fieldType, field := range fields {
|
for _, f := range fields {
|
||||||
if !field.IsValid() {
|
field, fieldValue := f.field, f.val
|
||||||
|
if !fieldValue.IsValid() {
|
||||||
// This should never happen
|
// This should never happen
|
||||||
panic("field is not valid")
|
panic("field is not valid")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we can't set the field, then it is unexported or something,
|
// If we can't set the field, then it is unexported or something,
|
||||||
// and we just continue onwards.
|
// and we just continue onwards.
|
||||||
if !field.CanSet() {
|
if !fieldValue.CanSet() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
fieldName := fieldType.Name
|
fieldName := field.Name
|
||||||
|
|
||||||
tagValue := fieldType.Tag.Get(tagName)
|
tagValue := field.Tag.Get(tagName)
|
||||||
tagParts := strings.SplitN(tagValue, ",", 2)
|
tagParts := strings.SplitN(tagValue, ",", 2)
|
||||||
if len(tagParts) >= 2 {
|
if len(tagParts) >= 2 {
|
||||||
switch tagParts[1] {
|
switch tagParts[1] {
|
||||||
case "decodedFields":
|
case "decodedFields":
|
||||||
decodedFieldsVal = append(decodedFieldsVal, field)
|
decodedFieldsVal = append(decodedFieldsVal, fieldValue)
|
||||||
continue
|
continue
|
||||||
case "key":
|
case "key":
|
||||||
if item == nil {
|
if item == nil {
|
||||||
|
@ -585,10 +666,10 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
field.SetString(item.Keys[0].Token.Value().(string))
|
fieldValue.SetString(item.Keys[0].Token.Value().(string))
|
||||||
continue
|
continue
|
||||||
case "unusedKeys":
|
case "unusedKeys":
|
||||||
unusedKeysVal = append(unusedKeysVal, field)
|
unusedKeysVal = append(unusedKeysVal, fieldValue)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -601,6 +682,7 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
|
||||||
// match (only object with the field), then we decode it exactly.
|
// match (only object with the field), then we decode it exactly.
|
||||||
// If it is a prefix match, then we decode the matches.
|
// If it is a prefix match, then we decode the matches.
|
||||||
filter := list.Filter(fieldName)
|
filter := list.Filter(fieldName)
|
||||||
|
|
||||||
prefixMatches := filter.Children()
|
prefixMatches := filter.Children()
|
||||||
matches := filter.Elem()
|
matches := filter.Elem()
|
||||||
if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
|
if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
|
||||||
|
@ -614,7 +696,7 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
|
||||||
// because we actually want the value.
|
// because we actually want the value.
|
||||||
fieldName = fmt.Sprintf("%s.%s", name, fieldName)
|
fieldName = fmt.Sprintf("%s.%s", name, fieldName)
|
||||||
if len(prefixMatches.Items) > 0 {
|
if len(prefixMatches.Items) > 0 {
|
||||||
if err := d.decode(fieldName, prefixMatches, field); err != nil {
|
if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -624,12 +706,12 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
|
||||||
decodeNode = &ast.ObjectList{Items: ot.List.Items}
|
decodeNode = &ast.ObjectList{Items: ot.List.Items}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := d.decode(fieldName, decodeNode, field); err != nil {
|
if err := d.decode(fieldName, decodeNode, fieldValue); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
decodedFields = append(decodedFields, fieldType.Name)
|
decodedFields = append(decodedFields, field.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(decodedFieldsVal) > 0 {
|
if len(decodedFieldsVal) > 0 {
|
||||||
|
|
10
vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
generated
vendored
10
vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
generated
vendored
|
@ -133,6 +133,12 @@ type ObjectItem struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *ObjectItem) Pos() token.Pos {
|
func (o *ObjectItem) Pos() token.Pos {
|
||||||
|
// I'm not entirely sure what causes this, but removing this causes
|
||||||
|
// a test failure. We should investigate at some point.
|
||||||
|
if len(o.Keys) == 0 {
|
||||||
|
return token.Pos{}
|
||||||
|
}
|
||||||
|
|
||||||
return o.Keys[0].Pos()
|
return o.Keys[0].Pos()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -150,7 +156,8 @@ func (o *ObjectKey) Pos() token.Pos {
|
||||||
type LiteralType struct {
|
type LiteralType struct {
|
||||||
Token token.Token
|
Token token.Token
|
||||||
|
|
||||||
// associated line comment, only when used in a list
|
// comment types, only used when in a list
|
||||||
|
LeadComment *CommentGroup
|
||||||
LineComment *CommentGroup
|
LineComment *CommentGroup
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -209,3 +216,4 @@ func (c *CommentGroup) Pos() token.Pos {
|
||||||
//-------------------------------------------------------------------
|
//-------------------------------------------------------------------
|
||||||
|
|
||||||
func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) }
|
func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) }
|
||||||
|
func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }
|
||||||
|
|
118
vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
generated
vendored
118
vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
generated
vendored
|
@ -3,6 +3,7 @@
|
||||||
package parser
|
package parser
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -36,6 +37,11 @@ func newParser(src []byte) *Parser {
|
||||||
|
|
||||||
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||||
func Parse(src []byte) (*ast.File, error) {
|
func Parse(src []byte) (*ast.File, error) {
|
||||||
|
// normalize all line endings
|
||||||
|
// since the scanner and output only work with "\n" line endings, we may
|
||||||
|
// end up with dangling "\r" characters in the parsed data.
|
||||||
|
src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
|
||||||
|
|
||||||
p := newParser(src)
|
p := newParser(src)
|
||||||
return p.Parse()
|
return p.Parse()
|
||||||
}
|
}
|
||||||
|
@ -50,7 +56,7 @@ func (p *Parser) Parse() (*ast.File, error) {
|
||||||
scerr = &PosError{Pos: pos, Err: errors.New(msg)}
|
scerr = &PosError{Pos: pos, Err: errors.New(msg)}
|
||||||
}
|
}
|
||||||
|
|
||||||
f.Node, err = p.objectList()
|
f.Node, err = p.objectList(false)
|
||||||
if scerr != nil {
|
if scerr != nil {
|
||||||
return nil, scerr
|
return nil, scerr
|
||||||
}
|
}
|
||||||
|
@ -62,11 +68,23 @@ func (p *Parser) Parse() (*ast.File, error) {
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Parser) objectList() (*ast.ObjectList, error) {
|
// objectList parses a list of items within an object (generally k/v pairs).
|
||||||
|
// The parameter" obj" tells this whether to we are within an object (braces:
|
||||||
|
// '{', '}') or just at the top level. If we're within an object, we end
|
||||||
|
// at an RBRACE.
|
||||||
|
func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
|
||||||
defer un(trace(p, "ParseObjectList"))
|
defer un(trace(p, "ParseObjectList"))
|
||||||
node := &ast.ObjectList{}
|
node := &ast.ObjectList{}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
|
if obj {
|
||||||
|
tok := p.scan()
|
||||||
|
p.unscan()
|
||||||
|
if tok.Type == token.RBRACE {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
n, err := p.objectItem()
|
n, err := p.objectItem()
|
||||||
if err == errEofToken {
|
if err == errEofToken {
|
||||||
break // we are finished
|
break // we are finished
|
||||||
|
@ -79,6 +97,13 @@ func (p *Parser) objectList() (*ast.ObjectList, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
node.Add(n)
|
node.Add(n)
|
||||||
|
|
||||||
|
// object lists can be optionally comma-delimited e.g. when a list of maps
|
||||||
|
// is being expressed, so a comma is allowed here - it's simply consumed
|
||||||
|
tok := p.scan()
|
||||||
|
if tok.Type != token.COMMA {
|
||||||
|
p.unscan()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return node, nil
|
return node, nil
|
||||||
}
|
}
|
||||||
|
@ -172,9 +197,18 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) {
|
||||||
keyStr = append(keyStr, k.Token.Text)
|
keyStr = append(keyStr, k.Token.Text)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf(
|
return nil, &PosError{
|
||||||
|
Pos: p.tok.Pos,
|
||||||
|
Err: fmt.Errorf(
|
||||||
"key '%s' expected start of object ('{') or assignment ('=')",
|
"key '%s' expected start of object ('{') or assignment ('=')",
|
||||||
strings.Join(keyStr, " "))
|
strings.Join(keyStr, " ")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// key=#comment
|
||||||
|
// val
|
||||||
|
if p.lineComment != nil {
|
||||||
|
o.LineComment, p.lineComment = p.lineComment, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// do a look-ahead for line comment
|
// do a look-ahead for line comment
|
||||||
|
@ -220,13 +254,27 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
|
||||||
|
|
||||||
return keys, nil
|
return keys, nil
|
||||||
case token.LBRACE:
|
case token.LBRACE:
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// If we have no keys, then it is a syntax error. i.e. {{}} is not
|
||||||
|
// allowed.
|
||||||
|
if len(keys) == 0 {
|
||||||
|
err = &PosError{
|
||||||
|
Pos: p.tok.Pos,
|
||||||
|
Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// object
|
// object
|
||||||
return keys, nil
|
return keys, err
|
||||||
case token.IDENT, token.STRING:
|
case token.IDENT, token.STRING:
|
||||||
keyCount++
|
keyCount++
|
||||||
keys = append(keys, &ast.ObjectKey{Token: p.tok})
|
keys = append(keys, &ast.ObjectKey{Token: p.tok})
|
||||||
case token.ILLEGAL:
|
case token.ILLEGAL:
|
||||||
fmt.Println("illegal")
|
return keys, &PosError{
|
||||||
|
Pos: p.tok.Pos,
|
||||||
|
Err: fmt.Errorf("illegal character"),
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return keys, &PosError{
|
return keys, &PosError{
|
||||||
Pos: p.tok.Pos,
|
Pos: p.tok.Pos,
|
||||||
|
@ -270,7 +318,7 @@ func (p *Parser) objectType() (*ast.ObjectType, error) {
|
||||||
Lbrace: p.tok.Pos,
|
Lbrace: p.tok.Pos,
|
||||||
}
|
}
|
||||||
|
|
||||||
l, err := p.objectList()
|
l, err := p.objectList(true)
|
||||||
|
|
||||||
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
|
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
|
||||||
// not a RBRACE, it's an syntax error and we just return it.
|
// not a RBRACE, it's an syntax error and we just return it.
|
||||||
|
@ -278,9 +326,12 @@ func (p *Parser) objectType() (*ast.ObjectType, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there is no error, we should be at a RBRACE to end the object
|
// No error, scan and expect the ending to be a brace
|
||||||
if p.tok.Type != token.RBRACE {
|
if tok := p.scan(); tok.Type != token.RBRACE {
|
||||||
return nil, fmt.Errorf("object expected closing RBRACE got: %s", p.tok.Type)
|
return nil, &PosError{
|
||||||
|
Pos: tok.Pos,
|
||||||
|
Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
o.List = l
|
o.List = l
|
||||||
|
@ -300,27 +351,38 @@ func (p *Parser) listType() (*ast.ListType, error) {
|
||||||
needComma := false
|
needComma := false
|
||||||
for {
|
for {
|
||||||
tok := p.scan()
|
tok := p.scan()
|
||||||
switch tok.Type {
|
|
||||||
case token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
|
|
||||||
if needComma {
|
if needComma {
|
||||||
|
switch tok.Type {
|
||||||
|
case token.COMMA, token.RBRACK:
|
||||||
|
default:
|
||||||
return nil, &PosError{
|
return nil, &PosError{
|
||||||
Pos: tok.Pos,
|
Pos: tok.Pos,
|
||||||
Err: fmt.Errorf("unexpected token: %s. Expecting %s", tok.Type, token.COMMA),
|
Err: fmt.Errorf(
|
||||||
|
"error parsing list, expected comma or list end, got: %s",
|
||||||
|
tok.Type),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
switch tok.Type {
|
||||||
|
case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
|
||||||
node, err := p.literalType()
|
node, err := p.literalType()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If there is a lead comment, apply it
|
||||||
|
if p.leadComment != nil {
|
||||||
|
node.LeadComment = p.leadComment
|
||||||
|
p.leadComment = nil
|
||||||
|
}
|
||||||
|
|
||||||
l.Add(node)
|
l.Add(node)
|
||||||
needComma = true
|
needComma = true
|
||||||
case token.COMMA:
|
case token.COMMA:
|
||||||
// get next list item or we are at the end
|
// get next list item or we are at the end
|
||||||
// do a look-ahead for line comment
|
// do a look-ahead for line comment
|
||||||
p.scan()
|
p.scan()
|
||||||
if p.lineComment != nil {
|
if p.lineComment != nil && len(l.List) > 0 {
|
||||||
lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
|
lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
|
||||||
if ok {
|
if ok {
|
||||||
lit.LineComment = p.lineComment
|
lit.LineComment = p.lineComment
|
||||||
|
@ -332,12 +394,28 @@ func (p *Parser) listType() (*ast.ListType, error) {
|
||||||
|
|
||||||
needComma = false
|
needComma = false
|
||||||
continue
|
continue
|
||||||
case token.BOOL:
|
case token.LBRACE:
|
||||||
// TODO(arslan) should we support? not supported by HCL yet
|
// Looks like a nested object, so parse it out
|
||||||
|
node, err := p.objectType()
|
||||||
|
if err != nil {
|
||||||
|
return nil, &PosError{
|
||||||
|
Pos: tok.Pos,
|
||||||
|
Err: fmt.Errorf(
|
||||||
|
"error while trying to parse object within list: %s", err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
l.Add(node)
|
||||||
|
needComma = true
|
||||||
case token.LBRACK:
|
case token.LBRACK:
|
||||||
// TODO(arslan) should we support nested lists? Even though it's
|
node, err := p.listType()
|
||||||
// written in README of HCL, it's not a part of the grammar
|
if err != nil {
|
||||||
// (not defined in parse.y)
|
return nil, &PosError{
|
||||||
|
Pos: tok.Pos,
|
||||||
|
Err: fmt.Errorf(
|
||||||
|
"error while trying to parse list within list: %s", err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
l.Add(node)
|
||||||
case token.RBRACK:
|
case token.RBRACK:
|
||||||
// finished
|
// finished
|
||||||
l.Rbrack = p.tok.Pos
|
l.Rbrack = p.tok.Pos
|
||||||
|
|
51
vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
generated
vendored
51
vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
generated
vendored
|
@ -74,14 +74,6 @@ func (s *Scanner) next() rune {
|
||||||
return eof
|
return eof
|
||||||
}
|
}
|
||||||
|
|
||||||
if ch == utf8.RuneError && size == 1 {
|
|
||||||
s.srcPos.Column++
|
|
||||||
s.srcPos.Offset += size
|
|
||||||
s.lastCharLen = size
|
|
||||||
s.err("illegal UTF-8 encoding")
|
|
||||||
return ch
|
|
||||||
}
|
|
||||||
|
|
||||||
// remember last position
|
// remember last position
|
||||||
s.prevPos = s.srcPos
|
s.prevPos = s.srcPos
|
||||||
|
|
||||||
|
@ -89,12 +81,27 @@ func (s *Scanner) next() rune {
|
||||||
s.lastCharLen = size
|
s.lastCharLen = size
|
||||||
s.srcPos.Offset += size
|
s.srcPos.Offset += size
|
||||||
|
|
||||||
|
if ch == utf8.RuneError && size == 1 {
|
||||||
|
s.err("illegal UTF-8 encoding")
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
if ch == '\n' {
|
if ch == '\n' {
|
||||||
s.srcPos.Line++
|
s.srcPos.Line++
|
||||||
s.lastLineLen = s.srcPos.Column
|
s.lastLineLen = s.srcPos.Column
|
||||||
s.srcPos.Column = 0
|
s.srcPos.Column = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ch == '\x00' {
|
||||||
|
s.err("unexpected null character (0x00)")
|
||||||
|
return eof
|
||||||
|
}
|
||||||
|
|
||||||
|
if ch == '\uE123' {
|
||||||
|
s.err("unicode code point U+E123 reserved for internal use")
|
||||||
|
return utf8.RuneError
|
||||||
|
}
|
||||||
|
|
||||||
// debug
|
// debug
|
||||||
// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
|
// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
|
||||||
return ch
|
return ch
|
||||||
|
@ -224,6 +231,11 @@ func (s *Scanner) Scan() token.Token {
|
||||||
func (s *Scanner) scanComment(ch rune) {
|
func (s *Scanner) scanComment(ch rune) {
|
||||||
// single line comments
|
// single line comments
|
||||||
if ch == '#' || (ch == '/' && s.peek() != '*') {
|
if ch == '#' || (ch == '/' && s.peek() != '*') {
|
||||||
|
if ch == '/' && s.peek() != '/' {
|
||||||
|
s.err("expected '/' for comment")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
ch = s.next()
|
ch = s.next()
|
||||||
for ch != '\n' && ch >= 0 && ch != eof {
|
for ch != '\n' && ch >= 0 && ch != eof {
|
||||||
ch = s.next()
|
ch = s.next()
|
||||||
|
@ -340,7 +352,7 @@ func (s *Scanner) scanNumber(ch rune) token.Type {
|
||||||
return token.NUMBER
|
return token.NUMBER
|
||||||
}
|
}
|
||||||
|
|
||||||
// scanMantissa scans the mantissa begining from the rune. It returns the next
|
// scanMantissa scans the mantissa beginning from the rune. It returns the next
|
||||||
// non decimal rune. It's used to determine wheter it's a fraction or exponent.
|
// non decimal rune. It's used to determine wheter it's a fraction or exponent.
|
||||||
func (s *Scanner) scanMantissa(ch rune) rune {
|
func (s *Scanner) scanMantissa(ch rune) rune {
|
||||||
scanned := false
|
scanned := false
|
||||||
|
@ -421,16 +433,16 @@ func (s *Scanner) scanHeredoc() {
|
||||||
|
|
||||||
// Read the identifier
|
// Read the identifier
|
||||||
identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
|
identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
|
||||||
if len(identBytes) == 0 {
|
if len(identBytes) == 0 || (len(identBytes) == 1 && identBytes[0] == '-') {
|
||||||
s.err("zero-length heredoc anchor")
|
s.err("zero-length heredoc anchor")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var identRegexp *regexp.Regexp
|
var identRegexp *regexp.Regexp
|
||||||
if identBytes[0] == '-' {
|
if identBytes[0] == '-' {
|
||||||
identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes[1:]))
|
identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes[1:]))
|
||||||
} else {
|
} else {
|
||||||
identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes))
|
identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read the actual string value
|
// Read the actual string value
|
||||||
|
@ -469,7 +481,7 @@ func (s *Scanner) scanString() {
|
||||||
// read character after quote
|
// read character after quote
|
||||||
ch := s.next()
|
ch := s.next()
|
||||||
|
|
||||||
if ch == '\n' || ch < 0 || ch == eof {
|
if (ch == '\n' && braces == 0) || ch < 0 || ch == eof {
|
||||||
s.err("literal not terminated")
|
s.err("literal not terminated")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -525,16 +537,27 @@ func (s *Scanner) scanEscape() rune {
|
||||||
// scanDigits scans a rune with the given base for n times. For example an
|
// scanDigits scans a rune with the given base for n times. For example an
|
||||||
// octal notation \184 would yield in scanDigits(ch, 8, 3)
|
// octal notation \184 would yield in scanDigits(ch, 8, 3)
|
||||||
func (s *Scanner) scanDigits(ch rune, base, n int) rune {
|
func (s *Scanner) scanDigits(ch rune, base, n int) rune {
|
||||||
|
start := n
|
||||||
for n > 0 && digitVal(ch) < base {
|
for n > 0 && digitVal(ch) < base {
|
||||||
ch = s.next()
|
ch = s.next()
|
||||||
|
if ch == eof {
|
||||||
|
// If we see an EOF, we halt any more scanning of digits
|
||||||
|
// immediately.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
n--
|
n--
|
||||||
}
|
}
|
||||||
if n > 0 {
|
if n > 0 {
|
||||||
s.err("illegal char escape")
|
s.err("illegal char escape")
|
||||||
}
|
}
|
||||||
|
|
||||||
// we scanned all digits, put the last non digit char back
|
if n != start && ch != eof {
|
||||||
|
// we scanned all digits, put the last non digit char back,
|
||||||
|
// only if we read anything at all
|
||||||
s.unread()
|
s.unread()
|
||||||
|
}
|
||||||
|
|
||||||
return ch
|
return ch
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
16
vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
generated
vendored
16
vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
generated
vendored
|
@ -27,7 +27,7 @@ func Unquote(s string) (t string, err error) {
|
||||||
if quote != '"' {
|
if quote != '"' {
|
||||||
return "", ErrSyntax
|
return "", ErrSyntax
|
||||||
}
|
}
|
||||||
if contains(s, '\n') {
|
if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') {
|
||||||
return "", ErrSyntax
|
return "", ErrSyntax
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ func Unquote(s string) (t string, err error) {
|
||||||
for len(s) > 0 {
|
for len(s) > 0 {
|
||||||
// If we're starting a '${}' then let it through un-unquoted.
|
// If we're starting a '${}' then let it through un-unquoted.
|
||||||
// Specifically: we don't unquote any characters within the `${}`
|
// Specifically: we don't unquote any characters within the `${}`
|
||||||
// section, except for escaped quotes, which we handle specifically.
|
// section.
|
||||||
if s[0] == '$' && len(s) > 1 && s[1] == '{' {
|
if s[0] == '$' && len(s) > 1 && s[1] == '{' {
|
||||||
buf = append(buf, '$', '{')
|
buf = append(buf, '$', '{')
|
||||||
s = s[2:]
|
s = s[2:]
|
||||||
|
@ -64,14 +64,6 @@ func Unquote(s string) (t string, err error) {
|
||||||
|
|
||||||
s = s[size:]
|
s = s[size:]
|
||||||
|
|
||||||
// We special case escaped double quotes in interpolations, converting
|
|
||||||
// them to straight double quotes.
|
|
||||||
if r == '\\' {
|
|
||||||
if q, _ := utf8.DecodeRuneInString(s); q == '"' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
n := utf8.EncodeRune(runeTmp[:], r)
|
n := utf8.EncodeRune(runeTmp[:], r)
|
||||||
buf = append(buf, runeTmp[:n]...)
|
buf = append(buf, runeTmp[:n]...)
|
||||||
|
|
||||||
|
@ -95,6 +87,10 @@ func Unquote(s string) (t string, err error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if s[0] == '\n' {
|
||||||
|
return "", ErrSyntax
|
||||||
|
}
|
||||||
|
|
||||||
c, multibyte, ss, err := unquoteChar(s, quote)
|
c, multibyte, ss, err := unquoteChar(s, quote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
|
5
vendor/github.com/hashicorp/hcl/hcl/token/token.go
generated
vendored
5
vendor/github.com/hashicorp/hcl/hcl/token/token.go
generated
vendored
|
@ -152,6 +152,11 @@ func (t Token) Value() interface{} {
|
||||||
f = strconv.Unquote
|
f = strconv.Unquote
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This case occurs if json null is used
|
||||||
|
if t.Text == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
v, err := f(t.Text)
|
v, err := f(t.Text)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Sprintf("unquote %s err: %s", t.Text, err))
|
panic(fmt.Sprintf("unquote %s err: %s", t.Text, err))
|
||||||
|
|
6
vendor/github.com/hashicorp/hcl/json/parser/flatten.go
generated
vendored
6
vendor/github.com/hashicorp/hcl/json/parser/flatten.go
generated
vendored
|
@ -48,6 +48,12 @@ func flattenListType(
|
||||||
item *ast.ObjectItem,
|
item *ast.ObjectItem,
|
||||||
items []*ast.ObjectItem,
|
items []*ast.ObjectItem,
|
||||||
frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
|
frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
|
||||||
|
// If the list is empty, keep the original list
|
||||||
|
if len(ot.List) == 0 {
|
||||||
|
items = append(items, item)
|
||||||
|
return items, frontier
|
||||||
|
}
|
||||||
|
|
||||||
// All the elements of this object must also be objects!
|
// All the elements of this object must also be objects!
|
||||||
for _, subitem := range ot.List {
|
for _, subitem := range ot.List {
|
||||||
if _, ok := subitem.(*ast.ObjectType); !ok {
|
if _, ok := subitem.(*ast.ObjectType); !ok {
|
||||||
|
|
18
vendor/github.com/hashicorp/hcl/json/parser/parser.go
generated
vendored
18
vendor/github.com/hashicorp/hcl/json/parser/parser.go
generated
vendored
|
@ -5,6 +5,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/hashicorp/hcl/hcl/ast"
|
"github.com/hashicorp/hcl/hcl/ast"
|
||||||
|
hcltoken "github.com/hashicorp/hcl/hcl/token"
|
||||||
"github.com/hashicorp/hcl/json/scanner"
|
"github.com/hashicorp/hcl/json/scanner"
|
||||||
"github.com/hashicorp/hcl/json/token"
|
"github.com/hashicorp/hcl/json/token"
|
||||||
)
|
)
|
||||||
|
@ -85,6 +86,7 @@ func (p *Parser) objectList() (*ast.ObjectList, error) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return node, nil
|
return node, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,6 +105,14 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) {
|
||||||
|
|
||||||
switch p.tok.Type {
|
switch p.tok.Type {
|
||||||
case token.COLON:
|
case token.COLON:
|
||||||
|
pos := p.tok.Pos
|
||||||
|
o.Assign = hcltoken.Pos{
|
||||||
|
Filename: pos.Filename,
|
||||||
|
Offset: pos.Offset,
|
||||||
|
Line: pos.Line,
|
||||||
|
Column: pos.Column,
|
||||||
|
}
|
||||||
|
|
||||||
o.Val, err = p.objectValue()
|
o.Val, err = p.objectValue()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -128,10 +138,16 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
|
||||||
Token: p.tok.HCLToken(),
|
Token: p.tok.HCLToken(),
|
||||||
})
|
})
|
||||||
case token.COLON:
|
case token.COLON:
|
||||||
|
// If we have a zero keycount it means that we never got
|
||||||
|
// an object key, i.e. `{ :`. This is a syntax error.
|
||||||
|
if keyCount == 0 {
|
||||||
|
return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
|
||||||
|
}
|
||||||
|
|
||||||
// Done
|
// Done
|
||||||
return keys, nil
|
return keys, nil
|
||||||
case token.ILLEGAL:
|
case token.ILLEGAL:
|
||||||
fmt.Println("illegal")
|
return nil, errors.New("illegal")
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
|
return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
|
||||||
}
|
}
|
||||||
|
|
4
vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
generated
vendored
4
vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
generated
vendored
|
@ -246,7 +246,7 @@ func (s *Scanner) scanNumber(ch rune) token.Type {
|
||||||
return token.NUMBER
|
return token.NUMBER
|
||||||
}
|
}
|
||||||
|
|
||||||
// scanMantissa scans the mantissa begining from the rune. It returns the next
|
// scanMantissa scans the mantissa beginning from the rune. It returns the next
|
||||||
// non decimal rune. It's used to determine wheter it's a fraction or exponent.
|
// non decimal rune. It's used to determine wheter it's a fraction or exponent.
|
||||||
func (s *Scanner) scanMantissa(ch rune) rune {
|
func (s *Scanner) scanMantissa(ch rune) rune {
|
||||||
scanned := false
|
scanned := false
|
||||||
|
@ -296,7 +296,7 @@ func (s *Scanner) scanString() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if ch == '"' && braces == 0 {
|
if ch == '"' {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue