mirror of
https://github.com/Luzifer/korvike.git
synced 2024-11-08 15:30:05 +00:00
Fix: New dependencies were missing in vendoring
Signed-off-by: Knut Ahlers <knut@ahlers.me>
This commit is contained in:
parent
519a98dc90
commit
438179c28e
163 changed files with 46582 additions and 2 deletions
119
Godeps/Godeps.json
generated
119
Godeps/Godeps.json
generated
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"ImportPath": "github.com/Luzifer/korvike",
|
||||
"GoVersion": "go1.7",
|
||||
"GodepVersion": "v78",
|
||||
"GoVersion": "go1.8",
|
||||
"GodepVersion": "v79",
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/Luzifer/go_helpers/env",
|
||||
|
@ -13,10 +13,125 @@
|
|||
"Comment": "v1.1.0",
|
||||
"Rev": "c27bd3a64b5b19556914d9fec69922cf3852d585"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fatih/structs",
|
||||
"Rev": "3fe2facc32a7fbde4b29c0f85604dc1dd22836d2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/errwrap",
|
||||
"Rev": "7554cd9344cec97297fa6649b055a8c98c2a1e55"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-cleanhttp",
|
||||
"Rev": "ad28ea4487f05916463e2423a55166280e8254b5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-multierror",
|
||||
"Rev": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-rootcerts",
|
||||
"Rev": "6bb64b370b90e7ef1fa532be9e591a81c3493e00"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl",
|
||||
"Rev": "ef8133da8cda503718a74741312bf50821e6de79"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/hcl/ast",
|
||||
"Rev": "ef8133da8cda503718a74741312bf50821e6de79"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/hcl/parser",
|
||||
"Rev": "ef8133da8cda503718a74741312bf50821e6de79"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/hcl/scanner",
|
||||
"Rev": "ef8133da8cda503718a74741312bf50821e6de79"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/hcl/strconv",
|
||||
"Rev": "ef8133da8cda503718a74741312bf50821e6de79"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/hcl/token",
|
||||
"Rev": "ef8133da8cda503718a74741312bf50821e6de79"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/json/parser",
|
||||
"Rev": "ef8133da8cda503718a74741312bf50821e6de79"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/json/scanner",
|
||||
"Rev": "ef8133da8cda503718a74741312bf50821e6de79"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/json/token",
|
||||
"Rev": "ef8133da8cda503718a74741312bf50821e6de79"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/vault/api",
|
||||
"Comment": "v0.7.0-190-g4490e93",
|
||||
"Rev": "4490e93395fb70c3a25ade1fe88f363561a7d584"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/vault/helper/compressutil",
|
||||
"Comment": "v0.7.0-190-g4490e93",
|
||||
"Rev": "4490e93395fb70c3a25ade1fe88f363561a7d584"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/vault/helper/jsonutil",
|
||||
"Comment": "v0.7.0-190-g4490e93",
|
||||
"Rev": "4490e93395fb70c3a25ade1fe88f363561a7d584"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/go-homedir",
|
||||
"Rev": "981ab348d865cf048eb7d17e78ac7192632d8415"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/mapstructure",
|
||||
"Rev": "ca63d7c062ee3c9f34db231e352b60012b4fd0c1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/sethgrid/pester",
|
||||
"Rev": "4f4c0a67b6496764028e1ab9fd8dfb630282ed2f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/pflag",
|
||||
"Rev": "c7e63cf4530bcd3ba943729cee0efeff2ebea63f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/http2",
|
||||
"Rev": "dfe83d419c9403b40b19d08cdba2afec27b002f7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/http2/hpack",
|
||||
"Rev": "dfe83d419c9403b40b19d08cdba2afec27b002f7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/idna",
|
||||
"Rev": "dfe83d419c9403b40b19d08cdba2afec27b002f7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/lex/httplex",
|
||||
"Rev": "dfe83d419c9403b40b19d08cdba2afec27b002f7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/secure/bidirule",
|
||||
"Rev": "3491b61b9edc56653ad4333e605e2908e46a036b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/transform",
|
||||
"Rev": "3491b61b9edc56653ad4333e605e2908e46a036b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/unicode/bidi",
|
||||
"Rev": "3491b61b9edc56653ad4333e605e2908e46a036b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/unicode/norm",
|
||||
"Rev": "3491b61b9edc56653ad4333e605e2908e46a036b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/yaml.v2",
|
||||
"Rev": "31c299268d302dd0aa9a0dcf765a3d58971ac83f"
|
||||
|
|
23
vendor/github.com/fatih/structs/.gitignore
generated
vendored
Normal file
23
vendor/github.com/fatih/structs/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
11
vendor/github.com/fatih/structs/.travis.yml
generated
vendored
Normal file
11
vendor/github.com/fatih/structs/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.6
|
||||
- tip
|
||||
sudo: false
|
||||
before_install:
|
||||
- go get github.com/axw/gocov/gocov
|
||||
- go get github.com/mattn/goveralls
|
||||
- if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
|
||||
script:
|
||||
- $HOME/gopath/bin/goveralls -service=travis-ci
|
21
vendor/github.com/fatih/structs/LICENSE
generated
vendored
Normal file
21
vendor/github.com/fatih/structs/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Fatih Arslan
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
163
vendor/github.com/fatih/structs/README.md
generated
vendored
Normal file
163
vendor/github.com/fatih/structs/README.md
generated
vendored
Normal file
|
@ -0,0 +1,163 @@
|
|||
# Structs [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/structs) [![Build Status](http://img.shields.io/travis/fatih/structs.svg?style=flat-square)](https://travis-ci.org/fatih/structs) [![Coverage Status](http://img.shields.io/coveralls/fatih/structs.svg?style=flat-square)](https://coveralls.io/r/fatih/structs)
|
||||
|
||||
Structs contains various utilities to work with Go (Golang) structs. It was
|
||||
initially used by me to convert a struct into a `map[string]interface{}`. With
|
||||
time I've added other utilities for structs. It's basically a high level
|
||||
package based on primitives from the reflect package. Feel free to add new
|
||||
functions or improve the existing code.
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
go get github.com/fatih/structs
|
||||
```
|
||||
|
||||
## Usage and Examples
|
||||
|
||||
Just like the standard lib `strings`, `bytes` and co packages, `structs` has
|
||||
many global functions to manipulate or organize your struct data. Lets define
|
||||
and declare a struct:
|
||||
|
||||
```go
|
||||
type Server struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
ID int
|
||||
Enabled bool
|
||||
users []string // not exported
|
||||
http.Server // embedded
|
||||
}
|
||||
|
||||
server := &Server{
|
||||
Name: "gopher",
|
||||
ID: 123456,
|
||||
Enabled: true,
|
||||
}
|
||||
```
|
||||
|
||||
```go
|
||||
// Convert a struct to a map[string]interface{}
|
||||
// => {"Name":"gopher", "ID":123456, "Enabled":true}
|
||||
m := structs.Map(server)
|
||||
|
||||
// Convert the values of a struct to a []interface{}
|
||||
// => ["gopher", 123456, true]
|
||||
v := structs.Values(server)
|
||||
|
||||
// Convert the names of a struct to a []string
|
||||
// (see "Names methods" for more info about fields)
|
||||
n := structs.Names(server)
|
||||
|
||||
// Convert the values of a struct to a []*Field
|
||||
// (see "Field methods" for more info about fields)
|
||||
f := structs.Fields(server)
|
||||
|
||||
// Return the struct name => "Server"
|
||||
n := structs.Name(server)
|
||||
|
||||
// Check if any field of a struct is initialized or not.
|
||||
h := structs.HasZero(server)
|
||||
|
||||
// Check if all fields of a struct is initialized or not.
|
||||
z := structs.IsZero(server)
|
||||
|
||||
// Check if server is a struct or a pointer to struct
|
||||
i := structs.IsStruct(server)
|
||||
```
|
||||
|
||||
### Struct methods
|
||||
|
||||
The structs functions can be also used as independent methods by creating a new
|
||||
`*structs.Struct`. This is handy if you want to have more control over the
|
||||
structs (such as retrieving a single Field).
|
||||
|
||||
```go
|
||||
// Create a new struct type:
|
||||
s := structs.New(server)
|
||||
|
||||
m := s.Map() // Get a map[string]interface{}
|
||||
v := s.Values() // Get a []interface{}
|
||||
f := s.Fields() // Get a []*Field
|
||||
n := s.Names() // Get a []string
|
||||
f := s.Field(name) // Get a *Field based on the given field name
|
||||
f, ok := s.FieldOk(name) // Get a *Field based on the given field name
|
||||
n := s.Name() // Get the struct name
|
||||
h := s.HasZero() // Check if any field is initialized
|
||||
z := s.IsZero() // Check if all fields are initialized
|
||||
```
|
||||
|
||||
### Field methods
|
||||
|
||||
We can easily examine a single Field for more detail. Below you can see how we
|
||||
get and interact with various field methods:
|
||||
|
||||
|
||||
```go
|
||||
s := structs.New(server)
|
||||
|
||||
// Get the Field struct for the "Name" field
|
||||
name := s.Field("Name")
|
||||
|
||||
// Get the underlying value, value => "gopher"
|
||||
value := name.Value().(string)
|
||||
|
||||
// Set the field's value
|
||||
name.Set("another gopher")
|
||||
|
||||
// Get the field's kind, kind => "string"
|
||||
name.Kind()
|
||||
|
||||
// Check if the field is exported or not
|
||||
if name.IsExported() {
|
||||
fmt.Println("Name field is exported")
|
||||
}
|
||||
|
||||
// Check if the value is a zero value, such as "" for string, 0 for int
|
||||
if !name.IsZero() {
|
||||
fmt.Println("Name is initialized")
|
||||
}
|
||||
|
||||
// Check if the field is an anonymous (embedded) field
|
||||
if !name.IsEmbedded() {
|
||||
fmt.Println("Name is not an embedded field")
|
||||
}
|
||||
|
||||
// Get the Field's tag value for tag name "json", tag value => "name,omitempty"
|
||||
tagValue := name.Tag("json")
|
||||
```
|
||||
|
||||
Nested structs are supported too:
|
||||
|
||||
```go
|
||||
addrField := s.Field("Server").Field("Addr")
|
||||
|
||||
// Get the value for addr
|
||||
a := addrField.Value().(string)
|
||||
|
||||
// Or get all fields
|
||||
httpServer := s.Field("Server").Fields()
|
||||
```
|
||||
|
||||
We can also get a slice of Fields from the Struct type to iterate over all
|
||||
fields. This is handy if you wish to examine all fields:
|
||||
|
||||
```go
|
||||
s := structs.New(server)
|
||||
|
||||
for _, f := range s.Fields() {
|
||||
fmt.Printf("field name: %+v\n", f.Name())
|
||||
|
||||
if f.IsExported() {
|
||||
fmt.Printf("value : %+v\n", f.Value())
|
||||
fmt.Printf("is zero : %+v\n", f.IsZero())
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Credits
|
||||
|
||||
* [Fatih Arslan](https://github.com/fatih)
|
||||
* [Cihangir Savas](https://github.com/cihangir)
|
||||
|
||||
## License
|
||||
|
||||
The MIT License (MIT) - see LICENSE.md for more details
|
132
vendor/github.com/fatih/structs/field.go
generated
vendored
Normal file
132
vendor/github.com/fatih/structs/field.go
generated
vendored
Normal file
|
@ -0,0 +1,132 @@
|
|||
package structs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var (
|
||||
errNotExported = errors.New("field is not exported")
|
||||
errNotSettable = errors.New("field is not settable")
|
||||
)
|
||||
|
||||
// Field represents a single struct field that encapsulates high level
|
||||
// functions around the field.
|
||||
type Field struct {
|
||||
value reflect.Value
|
||||
field reflect.StructField
|
||||
defaultTag string
|
||||
}
|
||||
|
||||
// Tag returns the value associated with key in the tag string. If there is no
|
||||
// such key in the tag, Tag returns the empty string.
|
||||
func (f *Field) Tag(key string) string {
|
||||
return f.field.Tag.Get(key)
|
||||
}
|
||||
|
||||
// Value returns the underlying value of the field. It panics if the field
|
||||
// is not exported.
|
||||
func (f *Field) Value() interface{} {
|
||||
return f.value.Interface()
|
||||
}
|
||||
|
||||
// IsEmbedded returns true if the given field is an anonymous field (embedded)
|
||||
func (f *Field) IsEmbedded() bool {
|
||||
return f.field.Anonymous
|
||||
}
|
||||
|
||||
// IsExported returns true if the given field is exported.
|
||||
func (f *Field) IsExported() bool {
|
||||
return f.field.PkgPath == ""
|
||||
}
|
||||
|
||||
// IsZero returns true if the given field is not initialized (has a zero value).
|
||||
// It panics if the field is not exported.
|
||||
func (f *Field) IsZero() bool {
|
||||
zero := reflect.Zero(f.value.Type()).Interface()
|
||||
current := f.Value()
|
||||
|
||||
return reflect.DeepEqual(current, zero)
|
||||
}
|
||||
|
||||
// Name returns the name of the given field
|
||||
func (f *Field) Name() string {
|
||||
return f.field.Name
|
||||
}
|
||||
|
||||
// Kind returns the fields kind, such as "string", "map", "bool", etc ..
|
||||
func (f *Field) Kind() reflect.Kind {
|
||||
return f.value.Kind()
|
||||
}
|
||||
|
||||
// Set sets the field to given value v. It returns an error if the field is not
|
||||
// settable (not addressable or not exported) or if the given value's type
|
||||
// doesn't match the fields type.
|
||||
func (f *Field) Set(val interface{}) error {
|
||||
// we can't set unexported fields, so be sure this field is exported
|
||||
if !f.IsExported() {
|
||||
return errNotExported
|
||||
}
|
||||
|
||||
// do we get here? not sure...
|
||||
if !f.value.CanSet() {
|
||||
return errNotSettable
|
||||
}
|
||||
|
||||
given := reflect.ValueOf(val)
|
||||
|
||||
if f.value.Kind() != given.Kind() {
|
||||
return fmt.Errorf("wrong kind. got: %s want: %s", given.Kind(), f.value.Kind())
|
||||
}
|
||||
|
||||
f.value.Set(given)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Zero sets the field to its zero value. It returns an error if the field is not
|
||||
// settable (not addressable or not exported).
|
||||
func (f *Field) Zero() error {
|
||||
zero := reflect.Zero(f.value.Type()).Interface()
|
||||
return f.Set(zero)
|
||||
}
|
||||
|
||||
// Fields returns a slice of Fields. This is particular handy to get the fields
|
||||
// of a nested struct . A struct tag with the content of "-" ignores the
|
||||
// checking of that particular field. Example:
|
||||
//
|
||||
// // Field is ignored by this package.
|
||||
// Field *http.Request `structs:"-"`
|
||||
//
|
||||
// It panics if field is not exported or if field's kind is not struct
|
||||
func (f *Field) Fields() []*Field {
|
||||
return getFields(f.value, f.defaultTag)
|
||||
}
|
||||
|
||||
// Field returns the field from a nested struct. It panics if the nested struct
|
||||
// is not exported or if the field was not found.
|
||||
func (f *Field) Field(name string) *Field {
|
||||
field, ok := f.FieldOk(name)
|
||||
if !ok {
|
||||
panic("field not found")
|
||||
}
|
||||
|
||||
return field
|
||||
}
|
||||
|
||||
// FieldOk returns the field from a nested struct. The boolean returns whether
|
||||
// the field was found (true) or not (false).
|
||||
func (f *Field) FieldOk(name string) (*Field, bool) {
|
||||
v := strctVal(f.value.Interface())
|
||||
t := v.Type()
|
||||
|
||||
field, ok := t.FieldByName(name)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return &Field{
|
||||
field: field,
|
||||
value: v.FieldByName(name),
|
||||
}, true
|
||||
}
|
507
vendor/github.com/fatih/structs/structs.go
generated
vendored
Normal file
507
vendor/github.com/fatih/structs/structs.go
generated
vendored
Normal file
|
@ -0,0 +1,507 @@
|
|||
// Package structs contains various utilities functions to work with structs.
|
||||
package structs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultTagName is the default tag name for struct fields which provides
|
||||
// a more granular to tweak certain structs. Lookup the necessary functions
|
||||
// for more info.
|
||||
DefaultTagName = "structs" // struct's field default tag name
|
||||
)
|
||||
|
||||
// Struct encapsulates a struct type to provide several high level functions
|
||||
// around the struct.
|
||||
type Struct struct {
|
||||
raw interface{}
|
||||
value reflect.Value
|
||||
TagName string
|
||||
}
|
||||
|
||||
// New returns a new *Struct with the struct s. It panics if the s's kind is
|
||||
// not struct.
|
||||
func New(s interface{}) *Struct {
|
||||
return &Struct{
|
||||
raw: s,
|
||||
value: strctVal(s),
|
||||
TagName: DefaultTagName,
|
||||
}
|
||||
}
|
||||
|
||||
// Map converts the given struct to a map[string]interface{}, where the keys
|
||||
// of the map are the field names and the values of the map the associated
|
||||
// values of the fields. The default key string is the struct field name but
|
||||
// can be changed in the struct field's tag value. The "structs" key in the
|
||||
// struct's field tag value is the key name. Example:
|
||||
//
|
||||
// // Field appears in map as key "myName".
|
||||
// Name string `structs:"myName"`
|
||||
//
|
||||
// A tag value with the content of "-" ignores that particular field. Example:
|
||||
//
|
||||
// // Field is ignored by this package.
|
||||
// Field bool `structs:"-"`
|
||||
//
|
||||
// A tag value with the content of "string" uses the stringer to get the value. Example:
|
||||
//
|
||||
// // The value will be output of Animal's String() func.
|
||||
// // Map will panic if Animal does not implement String().
|
||||
// Field *Animal `structs:"field,string"`
|
||||
//
|
||||
// A tag value with the option of "flatten" used in a struct field is to flatten its fields
|
||||
// in the output map. Example:
|
||||
//
|
||||
// // The FieldStruct's fields will be flattened into the output map.
|
||||
// FieldStruct time.Time `structs:"flatten"`
|
||||
//
|
||||
// A tag value with the option of "omitnested" stops iterating further if the type
|
||||
// is a struct. Example:
|
||||
//
|
||||
// // Field is not processed further by this package.
|
||||
// Field time.Time `structs:"myName,omitnested"`
|
||||
// Field *http.Request `structs:",omitnested"`
|
||||
//
|
||||
// A tag value with the option of "omitempty" ignores that particular field if
|
||||
// the field value is empty. Example:
|
||||
//
|
||||
// // Field appears in map as key "myName", but the field is
|
||||
// // skipped if empty.
|
||||
// Field string `structs:"myName,omitempty"`
|
||||
//
|
||||
// // Field appears in map as key "Field" (the default), but
|
||||
// // the field is skipped if empty.
|
||||
// Field string `structs:",omitempty"`
|
||||
//
|
||||
// Note that only exported fields of a struct can be accessed, non exported
|
||||
// fields will be neglected.
|
||||
func (s *Struct) Map() map[string]interface{} {
|
||||
out := make(map[string]interface{})
|
||||
s.FillMap(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// FillMap is the same as Map. Instead of returning the output, it fills the
|
||||
// given map.
|
||||
func (s *Struct) FillMap(out map[string]interface{}) {
|
||||
if out == nil {
|
||||
return
|
||||
}
|
||||
|
||||
fields := s.structFields()
|
||||
|
||||
for _, field := range fields {
|
||||
name := field.Name
|
||||
val := s.value.FieldByName(name)
|
||||
isSubStruct := false
|
||||
var finalVal interface{}
|
||||
|
||||
tagName, tagOpts := parseTag(field.Tag.Get(s.TagName))
|
||||
if tagName != "" {
|
||||
name = tagName
|
||||
}
|
||||
|
||||
// if the value is a zero value and the field is marked as omitempty do
|
||||
// not include
|
||||
if tagOpts.Has("omitempty") {
|
||||
zero := reflect.Zero(val.Type()).Interface()
|
||||
current := val.Interface()
|
||||
|
||||
if reflect.DeepEqual(current, zero) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
|
||||
// look out for embedded structs, and convert them to a
|
||||
// map[string]interface{} too
|
||||
n := New(val.Interface())
|
||||
n.TagName = s.TagName
|
||||
m := n.Map()
|
||||
isSubStruct = true
|
||||
if len(m) == 0 {
|
||||
finalVal = val.Interface()
|
||||
} else {
|
||||
finalVal = m
|
||||
}
|
||||
} else {
|
||||
finalVal = val.Interface()
|
||||
}
|
||||
|
||||
if tagOpts.Has("string") {
|
||||
s, ok := val.Interface().(fmt.Stringer)
|
||||
if ok {
|
||||
out[name] = s.String()
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if isSubStruct && (tagOpts.Has("flatten")) {
|
||||
for k := range finalVal.(map[string]interface{}) {
|
||||
out[k] = finalVal.(map[string]interface{})[k]
|
||||
}
|
||||
} else {
|
||||
out[name] = finalVal
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Values converts the given s struct's field values to a []interface{}. A
|
||||
// struct tag with the content of "-" ignores the that particular field.
|
||||
// Example:
|
||||
//
|
||||
// // Field is ignored by this package.
|
||||
// Field int `structs:"-"`
|
||||
//
|
||||
// A value with the option of "omitnested" stops iterating further if the type
|
||||
// is a struct. Example:
|
||||
//
|
||||
// // Fields is not processed further by this package.
|
||||
// Field time.Time `structs:",omitnested"`
|
||||
// Field *http.Request `structs:",omitnested"`
|
||||
//
|
||||
// A tag value with the option of "omitempty" ignores that particular field and
|
||||
// is not added to the values if the field value is empty. Example:
|
||||
//
|
||||
// // Field is skipped if empty
|
||||
// Field string `structs:",omitempty"`
|
||||
//
|
||||
// Note that only exported fields of a struct can be accessed, non exported
|
||||
// fields will be neglected.
|
||||
func (s *Struct) Values() []interface{} {
|
||||
fields := s.structFields()
|
||||
|
||||
var t []interface{}
|
||||
|
||||
for _, field := range fields {
|
||||
val := s.value.FieldByName(field.Name)
|
||||
|
||||
_, tagOpts := parseTag(field.Tag.Get(s.TagName))
|
||||
|
||||
// if the value is a zero value and the field is marked as omitempty do
|
||||
// not include
|
||||
if tagOpts.Has("omitempty") {
|
||||
zero := reflect.Zero(val.Type()).Interface()
|
||||
current := val.Interface()
|
||||
|
||||
if reflect.DeepEqual(current, zero) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if tagOpts.Has("string") {
|
||||
s, ok := val.Interface().(fmt.Stringer)
|
||||
if ok {
|
||||
t = append(t, s.String())
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
|
||||
// look out for embedded structs, and convert them to a
|
||||
// []interface{} to be added to the final values slice
|
||||
for _, embeddedVal := range Values(val.Interface()) {
|
||||
t = append(t, embeddedVal)
|
||||
}
|
||||
} else {
|
||||
t = append(t, val.Interface())
|
||||
}
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
// Fields returns a slice of Fields. A struct tag with the content of "-"
|
||||
// ignores the checking of that particular field. Example:
|
||||
//
|
||||
// // Field is ignored by this package.
|
||||
// Field bool `structs:"-"`
|
||||
//
|
||||
// It panics if s's kind is not struct.
|
||||
func (s *Struct) Fields() []*Field {
|
||||
return getFields(s.value, s.TagName)
|
||||
}
|
||||
|
||||
// Names returns a slice of field names. A struct tag with the content of "-"
|
||||
// ignores the checking of that particular field. Example:
|
||||
//
|
||||
// // Field is ignored by this package.
|
||||
// Field bool `structs:"-"`
|
||||
//
|
||||
// It panics if s's kind is not struct.
|
||||
func (s *Struct) Names() []string {
|
||||
fields := getFields(s.value, s.TagName)
|
||||
|
||||
names := make([]string, len(fields))
|
||||
|
||||
for i, field := range fields {
|
||||
names[i] = field.Name()
|
||||
}
|
||||
|
||||
return names
|
||||
}
|
||||
|
||||
func getFields(v reflect.Value, tagName string) []*Field {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
t := v.Type()
|
||||
|
||||
var fields []*Field
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
field := t.Field(i)
|
||||
|
||||
if tag := field.Tag.Get(tagName); tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
f := &Field{
|
||||
field: field,
|
||||
value: v.FieldByName(field.Name),
|
||||
}
|
||||
|
||||
fields = append(fields, f)
|
||||
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// Field returns a new Field struct that provides several high level functions
|
||||
// around a single struct field entity. It panics if the field is not found.
|
||||
func (s *Struct) Field(name string) *Field {
|
||||
f, ok := s.FieldOk(name)
|
||||
if !ok {
|
||||
panic("field not found")
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// FieldOk returns a new Field struct that provides several high level functions
|
||||
// around a single struct field entity. The boolean returns true if the field
|
||||
// was found.
|
||||
func (s *Struct) FieldOk(name string) (*Field, bool) {
|
||||
t := s.value.Type()
|
||||
|
||||
field, ok := t.FieldByName(name)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return &Field{
|
||||
field: field,
|
||||
value: s.value.FieldByName(name),
|
||||
defaultTag: s.TagName,
|
||||
}, true
|
||||
}
|
||||
|
||||
// IsZero returns true if all fields in a struct is a zero value (not
|
||||
// initialized) A struct tag with the content of "-" ignores the checking of
|
||||
// that particular field. Example:
|
||||
//
|
||||
// // Field is ignored by this package.
|
||||
// Field bool `structs:"-"`
|
||||
//
|
||||
// A value with the option of "omitnested" stops iterating further if the type
|
||||
// is a struct. Example:
|
||||
//
|
||||
// // Field is not processed further by this package.
|
||||
// Field time.Time `structs:"myName,omitnested"`
|
||||
// Field *http.Request `structs:",omitnested"`
|
||||
//
|
||||
// Note that only exported fields of a struct can be accessed, non exported
|
||||
// fields will be neglected. It panics if s's kind is not struct.
|
||||
func (s *Struct) IsZero() bool {
|
||||
fields := s.structFields()
|
||||
|
||||
for _, field := range fields {
|
||||
val := s.value.FieldByName(field.Name)
|
||||
|
||||
_, tagOpts := parseTag(field.Tag.Get(s.TagName))
|
||||
|
||||
if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
|
||||
ok := IsZero(val.Interface())
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// zero value of the given field, such as "" for string, 0 for int
|
||||
zero := reflect.Zero(val.Type()).Interface()
|
||||
|
||||
// current value of the given field
|
||||
current := val.Interface()
|
||||
|
||||
if !reflect.DeepEqual(current, zero) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// HasZero returns true if a field in a struct is not initialized (zero value).
|
||||
// A struct tag with the content of "-" ignores the checking of that particular
|
||||
// field. Example:
|
||||
//
|
||||
// // Field is ignored by this package.
|
||||
// Field bool `structs:"-"`
|
||||
//
|
||||
// A value with the option of "omitnested" stops iterating further if the type
|
||||
// is a struct. Example:
|
||||
//
|
||||
// // Field is not processed further by this package.
|
||||
// Field time.Time `structs:"myName,omitnested"`
|
||||
// Field *http.Request `structs:",omitnested"`
|
||||
//
|
||||
// Note that only exported fields of a struct can be accessed, non exported
|
||||
// fields will be neglected. It panics if s's kind is not struct.
|
||||
func (s *Struct) HasZero() bool {
|
||||
fields := s.structFields()
|
||||
|
||||
for _, field := range fields {
|
||||
val := s.value.FieldByName(field.Name)
|
||||
|
||||
_, tagOpts := parseTag(field.Tag.Get(s.TagName))
|
||||
|
||||
if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
|
||||
ok := HasZero(val.Interface())
|
||||
if ok {
|
||||
return true
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// zero value of the given field, such as "" for string, 0 for int
|
||||
zero := reflect.Zero(val.Type()).Interface()
|
||||
|
||||
// current value of the given field
|
||||
current := val.Interface()
|
||||
|
||||
if reflect.DeepEqual(current, zero) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Name returns the structs's type name within its package. For more info refer
|
||||
// to Name() function.
|
||||
func (s *Struct) Name() string {
|
||||
return s.value.Type().Name()
|
||||
}
|
||||
|
||||
// structFields returns the exported struct fields for a given s struct. This
|
||||
// is a convenient helper method to avoid duplicate code in some of the
|
||||
// functions.
|
||||
func (s *Struct) structFields() []reflect.StructField {
|
||||
t := s.value.Type()
|
||||
|
||||
var f []reflect.StructField
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
field := t.Field(i)
|
||||
// we can't access the value of unexported fields
|
||||
if field.PkgPath != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// don't check if it's omitted
|
||||
if tag := field.Tag.Get(s.TagName); tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
f = append(f, field)
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func strctVal(s interface{}) reflect.Value {
|
||||
v := reflect.ValueOf(s)
|
||||
|
||||
// if pointer get the underlying element≤
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
if v.Kind() != reflect.Struct {
|
||||
panic("not struct")
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// Map converts the given struct to a map[string]interface{}. For more info
|
||||
// refer to Struct types Map() method. It panics if s's kind is not struct.
|
||||
func Map(s interface{}) map[string]interface{} {
|
||||
return New(s).Map()
|
||||
}
|
||||
|
||||
// FillMap is the same as Map. Instead of returning the output, it fills the
|
||||
// given map.
|
||||
func FillMap(s interface{}, out map[string]interface{}) {
|
||||
New(s).FillMap(out)
|
||||
}
|
||||
|
||||
// Values converts the given struct to a []interface{}. For more info refer to
|
||||
// Struct types Values() method. It panics if s's kind is not struct.
|
||||
func Values(s interface{}) []interface{} {
|
||||
return New(s).Values()
|
||||
}
|
||||
|
||||
// Fields returns a slice of *Field. For more info refer to Struct types
|
||||
// Fields() method. It panics if s's kind is not struct.
|
||||
func Fields(s interface{}) []*Field {
|
||||
return New(s).Fields()
|
||||
}
|
||||
|
||||
// Names returns a slice of field names. For more info refer to Struct types
|
||||
// Names() method. It panics if s's kind is not struct.
|
||||
func Names(s interface{}) []string {
|
||||
return New(s).Names()
|
||||
}
|
||||
|
||||
// IsZero returns true if all fields is equal to a zero value. For more info
|
||||
// refer to Struct types IsZero() method. It panics if s's kind is not struct.
|
||||
func IsZero(s interface{}) bool {
|
||||
return New(s).IsZero()
|
||||
}
|
||||
|
||||
// HasZero returns true if any field is equal to a zero value. For more info
|
||||
// refer to Struct types HasZero() method. It panics if s's kind is not struct.
|
||||
func HasZero(s interface{}) bool {
|
||||
return New(s).HasZero()
|
||||
}
|
||||
|
||||
// IsStruct returns true if the given variable is a struct or a pointer to
|
||||
// struct.
|
||||
func IsStruct(s interface{}) bool {
|
||||
v := reflect.ValueOf(s)
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
// uninitialized zero value of a struct
|
||||
if v.Kind() == reflect.Invalid {
|
||||
return false
|
||||
}
|
||||
|
||||
return v.Kind() == reflect.Struct
|
||||
}
|
||||
|
||||
// Name returns the structs's type name within its package. It returns an
|
||||
// empty string for unnamed types. It panics if s's kind is not struct.
|
||||
func Name(s interface{}) string {
|
||||
return New(s).Name()
|
||||
}
|
32
vendor/github.com/fatih/structs/tags.go
generated
vendored
Normal file
32
vendor/github.com/fatih/structs/tags.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
package structs
|
||||
|
||||
import "strings"
|
||||
|
||||
// tagOptions contains a slice of tag options
|
||||
type tagOptions []string
|
||||
|
||||
// Has returns true if the given optiton is available in tagOptions
|
||||
func (t tagOptions) Has(opt string) bool {
|
||||
for _, tagOpt := range t {
|
||||
if tagOpt == opt {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// parseTag splits a struct field's tag into its name and a list of options
|
||||
// which comes after a name. A tag is in the form of: "name,option1,option2".
|
||||
// The name can be neglectected.
|
||||
func parseTag(tag string) (string, tagOptions) {
|
||||
// tag is one of followings:
|
||||
// ""
|
||||
// "name"
|
||||
// "name,opt"
|
||||
// "name,opt,opt2"
|
||||
// ",opt"
|
||||
|
||||
res := strings.Split(tag, ",")
|
||||
return res[0], res[1:]
|
||||
}
|
354
vendor/github.com/hashicorp/errwrap/LICENSE
generated
vendored
Normal file
354
vendor/github.com/hashicorp/errwrap/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,354 @@
|
|||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. “Contributor”
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. “Contributor Version”
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor’s Contribution.
|
||||
|
||||
1.3. “Contribution”
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. “Covered Software”
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. “Incompatible With Secondary Licenses”
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of version
|
||||
1.1 or earlier of the License, but not also under the terms of a
|
||||
Secondary License.
|
||||
|
||||
1.6. “Executable Form”
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. “Larger Work”
|
||||
|
||||
means a work that combines Covered Software with other material, in a separate
|
||||
file or files, that is not Covered Software.
|
||||
|
||||
1.8. “License”
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. “Licensable”
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether at the
|
||||
time of the initial grant or subsequently, any and all of the rights conveyed by
|
||||
this License.
|
||||
|
||||
1.10. “Modifications”
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to, deletion
|
||||
from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. “Patent Claims” of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method, process,
|
||||
and apparatus claims, in any patent Licensable by such Contributor that
|
||||
would be infringed, but for the grant of the License, by the making,
|
||||
using, selling, offering for sale, having made, import, or transfer of
|
||||
either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. “Secondary License”
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. “Source Code Form”
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. “You” (or “Your”)
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, “You” includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, “control” means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or as
|
||||
part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its Contributions
|
||||
or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution become
|
||||
effective for each Contribution on the date the Contributor first distributes
|
||||
such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under this
|
||||
License. No additional rights or licenses will be implied from the distribution
|
||||
or licensing of Covered Software under this License. Notwithstanding Section
|
||||
2.1(b) above, no patent license is granted by a Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party’s
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of its
|
||||
Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks, or
|
||||
logos of any Contributor (except as may be necessary to comply with the
|
||||
notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this License
|
||||
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
||||
under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its Contributions
|
||||
are its original creation(s) or it has sufficient rights to grant the
|
||||
rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under applicable
|
||||
copyright doctrines of fair use, fair dealing, or other equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under the
|
||||
terms of this License. You must inform recipients that the Source Code Form
|
||||
of the Covered Software is governed by the terms of this License, and how
|
||||
they can obtain a copy of this License. You may not attempt to alter or
|
||||
restrict the recipients’ rights in the Source Code Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this License,
|
||||
or sublicense it under different terms, provided that the license for
|
||||
the Executable Form does not attempt to limit or alter the recipients’
|
||||
rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for the
|
||||
Covered Software. If the Larger Work is a combination of Covered Software
|
||||
with a work governed by one or more Secondary Licenses, and the Covered
|
||||
Software is not Incompatible With Secondary Licenses, this License permits
|
||||
You to additionally distribute such Covered Software under the terms of
|
||||
such Secondary License(s), so that the recipient of the Larger Work may, at
|
||||
their option, further distribute the Covered Software under the terms of
|
||||
either this License or such Secondary License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices (including
|
||||
copyright notices, patent notices, disclaimers of warranty, or limitations
|
||||
of liability) contained within the Source Code Form of the Covered
|
||||
Software, except that You may alter any license notices to the extent
|
||||
required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on behalf
|
||||
of any Contributor. You must make it absolutely clear that any such
|
||||
warranty, support, indemnity, or liability obligation is offered by You
|
||||
alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute, judicial
|
||||
order, or regulation then You must: (a) comply with the terms of this License
|
||||
to the maximum extent possible; and (b) describe the limitations and the code
|
||||
they affect. Such description must be placed in a text file included with all
|
||||
distributions of the Covered Software under this License. Except to the
|
||||
extent prohibited by statute or regulation, such description must be
|
||||
sufficiently detailed for a recipient of ordinary skill to be able to
|
||||
understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
||||
if such Contributor fails to notify You of the non-compliance by some
|
||||
reasonable means prior to 60 days after You have come back into compliance.
|
||||
Moreover, Your grants from a particular Contributor are reinstated on an
|
||||
ongoing basis if such Contributor notifies You of the non-compliance by
|
||||
some reasonable means, this is the first time You have received notice of
|
||||
non-compliance with this License from such Contributor, and You become
|
||||
compliant prior to 30 days after Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions, counter-claims,
|
||||
and cross-claims) alleging that a Contributor Version directly or
|
||||
indirectly infringes any patent, then the rights granted to You by any and
|
||||
all Contributors for the Covered Software under Section 2.1 of this License
|
||||
shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an “as is” basis, without
|
||||
warranty of any kind, either expressed, implied, or statutory, including,
|
||||
without limitation, warranties that the Covered Software is free of defects,
|
||||
merchantable, fit for a particular purpose or non-infringing. The entire
|
||||
risk as to the quality and performance of the Covered Software is with You.
|
||||
Should any Covered Software prove defective in any respect, You (not any
|
||||
Contributor) assume the cost of any necessary servicing, repair, or
|
||||
correction. This disclaimer of warranty constitutes an essential part of this
|
||||
License. No use of any Covered Software is authorized under this License
|
||||
except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from such
|
||||
party’s negligence to the extent applicable law prohibits such limitation.
|
||||
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
||||
consequential damages, so this exclusion and limitation may not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts of
|
||||
a jurisdiction where the defendant maintains its principal place of business
|
||||
and such litigation shall be governed by laws of that jurisdiction, without
|
||||
reference to its conflict-of-law provisions. Nothing in this Section shall
|
||||
prevent a party’s ability to bring cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject matter
|
||||
hereof. If any provision of this License is held to be unenforceable, such
|
||||
provision shall be reformed only to the extent necessary to make it
|
||||
enforceable. Any law or regulation which provides that the language of a
|
||||
contract shall be construed against the drafter shall not be used to construe
|
||||
this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version of
|
||||
the License under which You originally received the Covered Software, or
|
||||
under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a modified
|
||||
version of this License if you rename the license and remove any
|
||||
references to the name of the license steward (except to note that such
|
||||
modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file, then
|
||||
You may include the notice in a location (such as a LICENSE file in a relevant
|
||||
directory) where a recipient would be likely to look for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
||||
|
||||
This Source Code Form is “Incompatible
|
||||
With Secondary Licenses”, as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
||||
|
89
vendor/github.com/hashicorp/errwrap/README.md
generated
vendored
Normal file
89
vendor/github.com/hashicorp/errwrap/README.md
generated
vendored
Normal file
|
@ -0,0 +1,89 @@
|
|||
# errwrap
|
||||
|
||||
`errwrap` is a package for Go that formalizes the pattern of wrapping errors
|
||||
and checking if an error contains another error.
|
||||
|
||||
There is a common pattern in Go of taking a returned `error` value and
|
||||
then wrapping it (such as with `fmt.Errorf`) before returning it. The problem
|
||||
with this pattern is that you completely lose the original `error` structure.
|
||||
|
||||
Arguably the _correct_ approach is that you should make a custom structure
|
||||
implementing the `error` interface, and have the original error as a field
|
||||
on that structure, such [as this example](http://golang.org/pkg/os/#PathError).
|
||||
This is a good approach, but you have to know the entire chain of possible
|
||||
rewrapping that happens, when you might just care about one.
|
||||
|
||||
`errwrap` formalizes this pattern (it doesn't matter what approach you use
|
||||
above) by giving a single interface for wrapping errors, checking if a specific
|
||||
error is wrapped, and extracting that error.
|
||||
|
||||
## Installation and Docs
|
||||
|
||||
Install using `go get github.com/hashicorp/errwrap`.
|
||||
|
||||
Full documentation is available at
|
||||
http://godoc.org/github.com/hashicorp/errwrap
|
||||
|
||||
## Usage
|
||||
|
||||
#### Basic Usage
|
||||
|
||||
Below is a very basic example of its usage:
|
||||
|
||||
```go
|
||||
// A function that always returns an error, but wraps it, like a real
|
||||
// function might.
|
||||
func tryOpen() error {
|
||||
_, err := os.Open("/i/dont/exist")
|
||||
if err != nil {
|
||||
return errwrap.Wrapf("Doesn't exist: {{err}}", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
err := tryOpen()
|
||||
|
||||
// We can use the Contains helpers to check if an error contains
|
||||
// another error. It is safe to do this with a nil error, or with
|
||||
// an error that doesn't even use the errwrap package.
|
||||
if errwrap.Contains(err, ErrNotExist) {
|
||||
// Do something
|
||||
}
|
||||
if errwrap.ContainsType(err, new(os.PathError)) {
|
||||
// Do something
|
||||
}
|
||||
|
||||
// Or we can use the associated `Get` functions to just extract
|
||||
// a specific error. This would return nil if that specific error doesn't
|
||||
// exist.
|
||||
perr := errwrap.GetType(err, new(os.PathError))
|
||||
}
|
||||
```
|
||||
|
||||
#### Custom Types
|
||||
|
||||
If you're already making custom types that properly wrap errors, then
|
||||
you can get all the functionality of `errwraps.Contains` and such by
|
||||
implementing the `Wrapper` interface with just one function. Example:
|
||||
|
||||
```go
|
||||
type AppError {
|
||||
Code ErrorCode
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *AppError) WrappedErrors() []error {
|
||||
return []error{e.Err}
|
||||
}
|
||||
```
|
||||
|
||||
Now this works:
|
||||
|
||||
```go
|
||||
err := &AppError{Err: fmt.Errorf("an error")}
|
||||
if errwrap.ContainsType(err, fmt.Errorf("")) {
|
||||
// This will work!
|
||||
}
|
||||
```
|
169
vendor/github.com/hashicorp/errwrap/errwrap.go
generated
vendored
Normal file
169
vendor/github.com/hashicorp/errwrap/errwrap.go
generated
vendored
Normal file
|
@ -0,0 +1,169 @@
|
|||
// Package errwrap implements methods to formalize error wrapping in Go.
|
||||
//
|
||||
// All of the top-level functions that take an `error` are built to be able
|
||||
// to take any error, not just wrapped errors. This allows you to use errwrap
|
||||
// without having to type-check and type-cast everywhere.
|
||||
package errwrap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// WalkFunc is the callback called for Walk.
|
||||
type WalkFunc func(error)
|
||||
|
||||
// Wrapper is an interface that can be implemented by custom types to
|
||||
// have all the Contains, Get, etc. functions in errwrap work.
|
||||
//
|
||||
// When Walk reaches a Wrapper, it will call the callback for every
|
||||
// wrapped error in addition to the wrapper itself. Since all the top-level
|
||||
// functions in errwrap use Walk, this means that all those functions work
|
||||
// with your custom type.
|
||||
type Wrapper interface {
|
||||
WrappedErrors() []error
|
||||
}
|
||||
|
||||
// Wrap defines that outer wraps inner, returning an error type that
|
||||
// can be cleanly used with the other methods in this package, such as
|
||||
// Contains, GetAll, etc.
|
||||
//
|
||||
// This function won't modify the error message at all (the outer message
|
||||
// will be used).
|
||||
func Wrap(outer, inner error) error {
|
||||
return &wrappedError{
|
||||
Outer: outer,
|
||||
Inner: inner,
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapf wraps an error with a formatting message. This is similar to using
|
||||
// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap
|
||||
// errors, you should replace it with this.
|
||||
//
|
||||
// format is the format of the error message. The string '{{err}}' will
|
||||
// be replaced with the original error message.
|
||||
func Wrapf(format string, err error) error {
|
||||
outerMsg := "<nil>"
|
||||
if err != nil {
|
||||
outerMsg = err.Error()
|
||||
}
|
||||
|
||||
outer := errors.New(strings.Replace(
|
||||
format, "{{err}}", outerMsg, -1))
|
||||
|
||||
return Wrap(outer, err)
|
||||
}
|
||||
|
||||
// Contains checks if the given error contains an error with the
|
||||
// message msg. If err is not a wrapped error, this will always return
|
||||
// false unless the error itself happens to match this msg.
|
||||
func Contains(err error, msg string) bool {
|
||||
return len(GetAll(err, msg)) > 0
|
||||
}
|
||||
|
||||
// ContainsType checks if the given error contains an error with
|
||||
// the same concrete type as v. If err is not a wrapped error, this will
|
||||
// check the err itself.
|
||||
func ContainsType(err error, v interface{}) bool {
|
||||
return len(GetAllType(err, v)) > 0
|
||||
}
|
||||
|
||||
// Get is the same as GetAll but returns the deepest matching error.
|
||||
func Get(err error, msg string) error {
|
||||
es := GetAll(err, msg)
|
||||
if len(es) > 0 {
|
||||
return es[len(es)-1]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetType is the same as GetAllType but returns the deepest matching error.
|
||||
func GetType(err error, v interface{}) error {
|
||||
es := GetAllType(err, v)
|
||||
if len(es) > 0 {
|
||||
return es[len(es)-1]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAll gets all the errors that might be wrapped in err with the
|
||||
// given message. The order of the errors is such that the outermost
|
||||
// matching error (the most recent wrap) is index zero, and so on.
|
||||
func GetAll(err error, msg string) []error {
|
||||
var result []error
|
||||
|
||||
Walk(err, func(err error) {
|
||||
if err.Error() == msg {
|
||||
result = append(result, err)
|
||||
}
|
||||
})
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// GetAllType gets all the errors that are the same type as v.
|
||||
//
|
||||
// The order of the return value is the same as described in GetAll.
|
||||
func GetAllType(err error, v interface{}) []error {
|
||||
var result []error
|
||||
|
||||
var search string
|
||||
if v != nil {
|
||||
search = reflect.TypeOf(v).String()
|
||||
}
|
||||
Walk(err, func(err error) {
|
||||
var needle string
|
||||
if err != nil {
|
||||
needle = reflect.TypeOf(err).String()
|
||||
}
|
||||
|
||||
if needle == search {
|
||||
result = append(result, err)
|
||||
}
|
||||
})
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Walk walks all the wrapped errors in err and calls the callback. If
|
||||
// err isn't a wrapped error, this will be called once for err. If err
|
||||
// is a wrapped error, the callback will be called for both the wrapper
|
||||
// that implements error as well as the wrapped error itself.
|
||||
func Walk(err error, cb WalkFunc) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
switch e := err.(type) {
|
||||
case *wrappedError:
|
||||
cb(e.Outer)
|
||||
Walk(e.Inner, cb)
|
||||
case Wrapper:
|
||||
cb(err)
|
||||
|
||||
for _, err := range e.WrappedErrors() {
|
||||
Walk(err, cb)
|
||||
}
|
||||
default:
|
||||
cb(err)
|
||||
}
|
||||
}
|
||||
|
||||
// wrappedError is an implementation of error that has both the
|
||||
// outer and inner errors.
|
||||
type wrappedError struct {
|
||||
Outer error
|
||||
Inner error
|
||||
}
|
||||
|
||||
func (w *wrappedError) Error() string {
|
||||
return w.Outer.Error()
|
||||
}
|
||||
|
||||
func (w *wrappedError) WrappedErrors() []error {
|
||||
return []error{w.Outer, w.Inner}
|
||||
}
|
363
vendor/github.com/hashicorp/go-cleanhttp/LICENSE
generated
vendored
Normal file
363
vendor/github.com/hashicorp/go-cleanhttp/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,363 @@
|
|||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. "Contributor"
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the terms of
|
||||
a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
|
||||
means a work that combines Covered Software with other material, in a
|
||||
separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether
|
||||
at the time of the initial grant or subsequently, any and all of the
|
||||
rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the License,
|
||||
by the making, using, selling, offering for sale, having made, import,
|
||||
or transfer of either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, "control" means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights to
|
||||
grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter the
|
||||
recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty, or
|
||||
limitations of liability) contained within the Source Code Form of the
|
||||
Covered Software, except that You may alter any license notices to the
|
||||
extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute,
|
||||
judicial order, or regulation then You must: (a) comply with the terms of
|
||||
this License to the maximum extent possible; and (b) describe the
|
||||
limitations and the code they affect. Such description must be placed in a
|
||||
text file included with all distributions of the Covered Software under
|
||||
this License. Except to the extent prohibited by statute or regulation,
|
||||
such description must be sufficiently detailed for a recipient of ordinary
|
||||
skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing
|
||||
basis, if such Contributor fails to notify You of the non-compliance by
|
||||
some reasonable means prior to 60 days after You have come back into
|
||||
compliance. Moreover, Your grants from a particular Contributor are
|
||||
reinstated on an ongoing basis if such Contributor notifies You of the
|
||||
non-compliance by some reasonable means, this is the first time You have
|
||||
received notice of non-compliance with this License from such
|
||||
Contributor, and You become compliant prior to 30 days after Your receipt
|
||||
of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an "as is" basis,
|
||||
without warranty of any kind, either expressed, implied, or statutory,
|
||||
including, without limitation, warranties that the Covered Software is free
|
||||
of defects, merchantable, fit for a particular purpose or non-infringing.
|
||||
The entire risk as to the quality and performance of the Covered Software
|
||||
is with You. Should any Covered Software prove defective in any respect,
|
||||
You (not any Contributor) assume the cost of any necessary servicing,
|
||||
repair, or correction. This disclaimer of warranty constitutes an essential
|
||||
part of this License. No use of any Covered Software is authorized under
|
||||
this License except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from
|
||||
such party's negligence to the extent applicable law prohibits such
|
||||
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
||||
incidental or consequential damages, so this exclusion and limitation may
|
||||
not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts
|
||||
of a jurisdiction where the defendant maintains its principal place of
|
||||
business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
||||
in this Section shall prevent a party's ability to bring cross-claims or
|
||||
counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides that
|
||||
the language of a contract shall be construed against the drafter shall not
|
||||
be used to construe this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses If You choose to distribute Source Code Form that is
|
||||
Incompatible With Secondary Licenses under the terms of this version of
|
||||
the License, the notice described in Exhibit B of this License must be
|
||||
attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file,
|
||||
then You may include the notice in a location (such as a LICENSE file in a
|
||||
relevant directory) where a recipient would be likely to look for such a
|
||||
notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
|
||||
This Source Code Form is "Incompatible
|
||||
With Secondary Licenses", as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
||||
|
30
vendor/github.com/hashicorp/go-cleanhttp/README.md
generated
vendored
Normal file
30
vendor/github.com/hashicorp/go-cleanhttp/README.md
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
# cleanhttp
|
||||
|
||||
Functions for accessing "clean" Go http.Client values
|
||||
|
||||
-------------
|
||||
|
||||
The Go standard library contains a default `http.Client` called
|
||||
`http.DefaultClient`. It is a common idiom in Go code to start with
|
||||
`http.DefaultClient` and tweak it as necessary, and in fact, this is
|
||||
encouraged; from the `http` package documentation:
|
||||
|
||||
> The Client's Transport typically has internal state (cached TCP connections),
|
||||
so Clients should be reused instead of created as needed. Clients are safe for
|
||||
concurrent use by multiple goroutines.
|
||||
|
||||
Unfortunately, this is a shared value, and it is not uncommon for libraries to
|
||||
assume that they are free to modify it at will. With enough dependencies, it
|
||||
can be very easy to encounter strange problems and race conditions due to
|
||||
manipulation of this shared value across libraries and goroutines (clients are
|
||||
safe for concurrent use, but writing values to the client struct itself is not
|
||||
protected).
|
||||
|
||||
Making things worse is the fact that a bare `http.Client` will use a default
|
||||
`http.Transport` called `http.DefaultTransport`, which is another global value
|
||||
that behaves the same way. So it is not simply enough to replace
|
||||
`http.DefaultClient` with `&http.Client{}`.
|
||||
|
||||
This repository provides some simple functions to get a "clean" `http.Client`
|
||||
-- one that uses the same default values as the Go standard library, but
|
||||
returns a client that does not share any state with other clients.
|
53
vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
generated
vendored
Normal file
53
vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
package cleanhttp
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DefaultTransport returns a new http.Transport with the same default values
|
||||
// as http.DefaultTransport, but with idle connections and keepalives disabled.
|
||||
func DefaultTransport() *http.Transport {
|
||||
transport := DefaultPooledTransport()
|
||||
transport.DisableKeepAlives = true
|
||||
transport.MaxIdleConnsPerHost = -1
|
||||
return transport
|
||||
}
|
||||
|
||||
// DefaultPooledTransport returns a new http.Transport with similar default
|
||||
// values to http.DefaultTransport. Do not use this for transient transports as
|
||||
// it can leak file descriptors over time. Only use this for transports that
|
||||
// will be re-used for the same host(s).
|
||||
func DefaultPooledTransport() *http.Transport {
|
||||
transport := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
DisableKeepAlives: false,
|
||||
MaxIdleConnsPerHost: 1,
|
||||
}
|
||||
return transport
|
||||
}
|
||||
|
||||
// DefaultClient returns a new http.Client with similar default values to
|
||||
// http.Client, but with a non-shared Transport, idle connections disabled, and
|
||||
// keepalives disabled.
|
||||
func DefaultClient() *http.Client {
|
||||
return &http.Client{
|
||||
Transport: DefaultTransport(),
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultPooledClient returns a new http.Client with the same default values
|
||||
// as http.Client, but with a shared Transport. Do not use this function
|
||||
// for transient clients as it can leak file descriptors over time. Only use
|
||||
// this for clients that will be re-used for the same host(s).
|
||||
func DefaultPooledClient() *http.Client {
|
||||
return &http.Client{
|
||||
Transport: DefaultPooledTransport(),
|
||||
}
|
||||
}
|
20
vendor/github.com/hashicorp/go-cleanhttp/doc.go
generated
vendored
Normal file
20
vendor/github.com/hashicorp/go-cleanhttp/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
// Package cleanhttp offers convenience utilities for acquiring "clean"
|
||||
// http.Transport and http.Client structs.
|
||||
//
|
||||
// Values set on http.DefaultClient and http.DefaultTransport affect all
|
||||
// callers. This can have detrimental effects, esepcially in TLS contexts,
|
||||
// where client or root certificates set to talk to multiple endpoints can end
|
||||
// up displacing each other, leading to hard-to-debug issues. This package
|
||||
// provides non-shared http.Client and http.Transport structs to ensure that
|
||||
// the configuration will not be overwritten by other parts of the application
|
||||
// or dependencies.
|
||||
//
|
||||
// The DefaultClient and DefaultTransport functions disable idle connections
|
||||
// and keepalives. Without ensuring that idle connections are closed before
|
||||
// garbage collection, short-term clients/transports can leak file descriptors,
|
||||
// eventually leading to "too many open files" errors. If you will be
|
||||
// connecting to the same hosts repeatedly from the same client, you can use
|
||||
// DefaultPooledClient to receive a client that has connection pooling
|
||||
// semantics similar to http.DefaultClient.
|
||||
//
|
||||
package cleanhttp
|
353
vendor/github.com/hashicorp/go-multierror/LICENSE
generated
vendored
Normal file
353
vendor/github.com/hashicorp/go-multierror/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,353 @@
|
|||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. “Contributor”
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. “Contributor Version”
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor’s Contribution.
|
||||
|
||||
1.3. “Contribution”
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. “Covered Software”
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. “Incompatible With Secondary Licenses”
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of version
|
||||
1.1 or earlier of the License, but not also under the terms of a
|
||||
Secondary License.
|
||||
|
||||
1.6. “Executable Form”
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. “Larger Work”
|
||||
|
||||
means a work that combines Covered Software with other material, in a separate
|
||||
file or files, that is not Covered Software.
|
||||
|
||||
1.8. “License”
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. “Licensable”
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether at the
|
||||
time of the initial grant or subsequently, any and all of the rights conveyed by
|
||||
this License.
|
||||
|
||||
1.10. “Modifications”
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to, deletion
|
||||
from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. “Patent Claims” of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method, process,
|
||||
and apparatus claims, in any patent Licensable by such Contributor that
|
||||
would be infringed, but for the grant of the License, by the making,
|
||||
using, selling, offering for sale, having made, import, or transfer of
|
||||
either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. “Secondary License”
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. “Source Code Form”
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. “You” (or “Your”)
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, “You” includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, “control” means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or as
|
||||
part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its Contributions
|
||||
or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution become
|
||||
effective for each Contribution on the date the Contributor first distributes
|
||||
such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under this
|
||||
License. No additional rights or licenses will be implied from the distribution
|
||||
or licensing of Covered Software under this License. Notwithstanding Section
|
||||
2.1(b) above, no patent license is granted by a Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party’s
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of its
|
||||
Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks, or
|
||||
logos of any Contributor (except as may be necessary to comply with the
|
||||
notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this License
|
||||
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
||||
under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its Contributions
|
||||
are its original creation(s) or it has sufficient rights to grant the
|
||||
rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under applicable
|
||||
copyright doctrines of fair use, fair dealing, or other equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under the
|
||||
terms of this License. You must inform recipients that the Source Code Form
|
||||
of the Covered Software is governed by the terms of this License, and how
|
||||
they can obtain a copy of this License. You may not attempt to alter or
|
||||
restrict the recipients’ rights in the Source Code Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this License,
|
||||
or sublicense it under different terms, provided that the license for
|
||||
the Executable Form does not attempt to limit or alter the recipients’
|
||||
rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for the
|
||||
Covered Software. If the Larger Work is a combination of Covered Software
|
||||
with a work governed by one or more Secondary Licenses, and the Covered
|
||||
Software is not Incompatible With Secondary Licenses, this License permits
|
||||
You to additionally distribute such Covered Software under the terms of
|
||||
such Secondary License(s), so that the recipient of the Larger Work may, at
|
||||
their option, further distribute the Covered Software under the terms of
|
||||
either this License or such Secondary License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices (including
|
||||
copyright notices, patent notices, disclaimers of warranty, or limitations
|
||||
of liability) contained within the Source Code Form of the Covered
|
||||
Software, except that You may alter any license notices to the extent
|
||||
required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on behalf
|
||||
of any Contributor. You must make it absolutely clear that any such
|
||||
warranty, support, indemnity, or liability obligation is offered by You
|
||||
alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute, judicial
|
||||
order, or regulation then You must: (a) comply with the terms of this License
|
||||
to the maximum extent possible; and (b) describe the limitations and the code
|
||||
they affect. Such description must be placed in a text file included with all
|
||||
distributions of the Covered Software under this License. Except to the
|
||||
extent prohibited by statute or regulation, such description must be
|
||||
sufficiently detailed for a recipient of ordinary skill to be able to
|
||||
understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
||||
if such Contributor fails to notify You of the non-compliance by some
|
||||
reasonable means prior to 60 days after You have come back into compliance.
|
||||
Moreover, Your grants from a particular Contributor are reinstated on an
|
||||
ongoing basis if such Contributor notifies You of the non-compliance by
|
||||
some reasonable means, this is the first time You have received notice of
|
||||
non-compliance with this License from such Contributor, and You become
|
||||
compliant prior to 30 days after Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions, counter-claims,
|
||||
and cross-claims) alleging that a Contributor Version directly or
|
||||
indirectly infringes any patent, then the rights granted to You by any and
|
||||
all Contributors for the Covered Software under Section 2.1 of this License
|
||||
shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an “as is” basis, without
|
||||
warranty of any kind, either expressed, implied, or statutory, including,
|
||||
without limitation, warranties that the Covered Software is free of defects,
|
||||
merchantable, fit for a particular purpose or non-infringing. The entire
|
||||
risk as to the quality and performance of the Covered Software is with You.
|
||||
Should any Covered Software prove defective in any respect, You (not any
|
||||
Contributor) assume the cost of any necessary servicing, repair, or
|
||||
correction. This disclaimer of warranty constitutes an essential part of this
|
||||
License. No use of any Covered Software is authorized under this License
|
||||
except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from such
|
||||
party’s negligence to the extent applicable law prohibits such limitation.
|
||||
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
||||
consequential damages, so this exclusion and limitation may not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts of
|
||||
a jurisdiction where the defendant maintains its principal place of business
|
||||
and such litigation shall be governed by laws of that jurisdiction, without
|
||||
reference to its conflict-of-law provisions. Nothing in this Section shall
|
||||
prevent a party’s ability to bring cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject matter
|
||||
hereof. If any provision of this License is held to be unenforceable, such
|
||||
provision shall be reformed only to the extent necessary to make it
|
||||
enforceable. Any law or regulation which provides that the language of a
|
||||
contract shall be construed against the drafter shall not be used to construe
|
||||
this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version of
|
||||
the License under which You originally received the Covered Software, or
|
||||
under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a modified
|
||||
version of this License if you rename the license and remove any
|
||||
references to the name of the license steward (except to note that such
|
||||
modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file, then
|
||||
You may include the notice in a location (such as a LICENSE file in a relevant
|
||||
directory) where a recipient would be likely to look for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
||||
|
||||
This Source Code Form is “Incompatible
|
||||
With Secondary Licenses”, as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
91
vendor/github.com/hashicorp/go-multierror/README.md
generated
vendored
Normal file
91
vendor/github.com/hashicorp/go-multierror/README.md
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
|||
# go-multierror
|
||||
|
||||
`go-multierror` is a package for Go that provides a mechanism for
|
||||
representing a list of `error` values as a single `error`.
|
||||
|
||||
This allows a function in Go to return an `error` that might actually
|
||||
be a list of errors. If the caller knows this, they can unwrap the
|
||||
list and access the errors. If the caller doesn't know, the error
|
||||
formats to a nice human-readable format.
|
||||
|
||||
`go-multierror` implements the
|
||||
[errwrap](https://github.com/hashicorp/errwrap) interface so that it can
|
||||
be used with that library, as well.
|
||||
|
||||
## Installation and Docs
|
||||
|
||||
Install using `go get github.com/hashicorp/go-multierror`.
|
||||
|
||||
Full documentation is available at
|
||||
http://godoc.org/github.com/hashicorp/go-multierror
|
||||
|
||||
## Usage
|
||||
|
||||
go-multierror is easy to use and purposely built to be unobtrusive in
|
||||
existing Go applications/libraries that may not be aware of it.
|
||||
|
||||
**Building a list of errors**
|
||||
|
||||
The `Append` function is used to create a list of errors. This function
|
||||
behaves a lot like the Go built-in `append` function: it doesn't matter
|
||||
if the first argument is nil, a `multierror.Error`, or any other `error`,
|
||||
the function behaves as you would expect.
|
||||
|
||||
```go
|
||||
var result error
|
||||
|
||||
if err := step1(); err != nil {
|
||||
result = multierror.Append(result, err)
|
||||
}
|
||||
if err := step2(); err != nil {
|
||||
result = multierror.Append(result, err)
|
||||
}
|
||||
|
||||
return result
|
||||
```
|
||||
|
||||
**Customizing the formatting of the errors**
|
||||
|
||||
By specifying a custom `ErrorFormat`, you can customize the format
|
||||
of the `Error() string` function:
|
||||
|
||||
```go
|
||||
var result *multierror.Error
|
||||
|
||||
// ... accumulate errors here, maybe using Append
|
||||
|
||||
if result != nil {
|
||||
result.ErrorFormat = func([]error) string {
|
||||
return "errors!"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Accessing the list of errors**
|
||||
|
||||
`multierror.Error` implements `error` so if the caller doesn't know about
|
||||
multierror, it will work just fine. But if you're aware a multierror might
|
||||
be returned, you can use type switches to access the list of errors:
|
||||
|
||||
```go
|
||||
if err := something(); err != nil {
|
||||
if merr, ok := err.(*multierror.Error); ok {
|
||||
// Use merr.Errors
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Returning a multierror only if there are errors**
|
||||
|
||||
If you build a `multierror.Error`, you can use the `ErrorOrNil` function
|
||||
to return an `error` implementation only if there are errors to return:
|
||||
|
||||
```go
|
||||
var result *multierror.Error
|
||||
|
||||
// ... accumulate errors here
|
||||
|
||||
// Return the `error` only if errors were added to the multierror, otherwise
|
||||
// return nil since there are no errors.
|
||||
return result.ErrorOrNil()
|
||||
```
|
37
vendor/github.com/hashicorp/go-multierror/append.go
generated
vendored
Normal file
37
vendor/github.com/hashicorp/go-multierror/append.go
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
package multierror
|
||||
|
||||
// Append is a helper function that will append more errors
|
||||
// onto an Error in order to create a larger multi-error.
|
||||
//
|
||||
// If err is not a multierror.Error, then it will be turned into
|
||||
// one. If any of the errs are multierr.Error, they will be flattened
|
||||
// one level into err.
|
||||
func Append(err error, errs ...error) *Error {
|
||||
switch err := err.(type) {
|
||||
case *Error:
|
||||
// Typed nils can reach here, so initialize if we are nil
|
||||
if err == nil {
|
||||
err = new(Error)
|
||||
}
|
||||
|
||||
// Go through each error and flatten
|
||||
for _, e := range errs {
|
||||
switch e := e.(type) {
|
||||
case *Error:
|
||||
err.Errors = append(err.Errors, e.Errors...)
|
||||
default:
|
||||
err.Errors = append(err.Errors, e)
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
default:
|
||||
newErrs := make([]error, 0, len(errs)+1)
|
||||
if err != nil {
|
||||
newErrs = append(newErrs, err)
|
||||
}
|
||||
newErrs = append(newErrs, errs...)
|
||||
|
||||
return Append(&Error{}, newErrs...)
|
||||
}
|
||||
}
|
26
vendor/github.com/hashicorp/go-multierror/flatten.go
generated
vendored
Normal file
26
vendor/github.com/hashicorp/go-multierror/flatten.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
package multierror
|
||||
|
||||
// Flatten flattens the given error, merging any *Errors together into
|
||||
// a single *Error.
|
||||
func Flatten(err error) error {
|
||||
// If it isn't an *Error, just return the error as-is
|
||||
if _, ok := err.(*Error); !ok {
|
||||
return err
|
||||
}
|
||||
|
||||
// Otherwise, make the result and flatten away!
|
||||
flatErr := new(Error)
|
||||
flatten(err, flatErr)
|
||||
return flatErr
|
||||
}
|
||||
|
||||
func flatten(err error, flatErr *Error) {
|
||||
switch err := err.(type) {
|
||||
case *Error:
|
||||
for _, e := range err.Errors {
|
||||
flatten(e, flatErr)
|
||||
}
|
||||
default:
|
||||
flatErr.Errors = append(flatErr.Errors, err)
|
||||
}
|
||||
}
|
23
vendor/github.com/hashicorp/go-multierror/format.go
generated
vendored
Normal file
23
vendor/github.com/hashicorp/go-multierror/format.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
package multierror
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ErrorFormatFunc is a function callback that is called by Error to
|
||||
// turn the list of errors into a string.
|
||||
type ErrorFormatFunc func([]error) string
|
||||
|
||||
// ListFormatFunc is a basic formatter that outputs the number of errors
|
||||
// that occurred along with a bullet point list of the errors.
|
||||
func ListFormatFunc(es []error) string {
|
||||
points := make([]string, len(es))
|
||||
for i, err := range es {
|
||||
points[i] = fmt.Sprintf("* %s", err)
|
||||
}
|
||||
|
||||
return fmt.Sprintf(
|
||||
"%d error(s) occurred:\n\n%s",
|
||||
len(es), strings.Join(points, "\n"))
|
||||
}
|
51
vendor/github.com/hashicorp/go-multierror/multierror.go
generated
vendored
Normal file
51
vendor/github.com/hashicorp/go-multierror/multierror.go
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
package multierror
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Error is an error type to track multiple errors. This is used to
|
||||
// accumulate errors in cases and return them as a single "error".
|
||||
type Error struct {
|
||||
Errors []error
|
||||
ErrorFormat ErrorFormatFunc
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
fn := e.ErrorFormat
|
||||
if fn == nil {
|
||||
fn = ListFormatFunc
|
||||
}
|
||||
|
||||
return fn(e.Errors)
|
||||
}
|
||||
|
||||
// ErrorOrNil returns an error interface if this Error represents
|
||||
// a list of errors, or returns nil if the list of errors is empty. This
|
||||
// function is useful at the end of accumulation to make sure that the value
|
||||
// returned represents the existence of errors.
|
||||
func (e *Error) ErrorOrNil() error {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
if len(e.Errors) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *Error) GoString() string {
|
||||
return fmt.Sprintf("*%#v", *e)
|
||||
}
|
||||
|
||||
// WrappedErrors returns the list of errors that this Error is wrapping.
|
||||
// It is an implementatin of the errwrap.Wrapper interface so that
|
||||
// multierror.Error can be used with that library.
|
||||
//
|
||||
// This method is not safe to be called concurrently and is no different
|
||||
// than accessing the Errors field directly. It is implementd only to
|
||||
// satisfy the errwrap.Wrapper interface.
|
||||
func (e *Error) WrappedErrors() []error {
|
||||
return e.Errors
|
||||
}
|
37
vendor/github.com/hashicorp/go-multierror/prefix.go
generated
vendored
Normal file
37
vendor/github.com/hashicorp/go-multierror/prefix.go
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
package multierror
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/errwrap"
|
||||
)
|
||||
|
||||
// Prefix is a helper function that will prefix some text
|
||||
// to the given error. If the error is a multierror.Error, then
|
||||
// it will be prefixed to each wrapped error.
|
||||
//
|
||||
// This is useful to use when appending multiple multierrors
|
||||
// together in order to give better scoping.
|
||||
func Prefix(err error, prefix string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
format := fmt.Sprintf("%s {{err}}", prefix)
|
||||
switch err := err.(type) {
|
||||
case *Error:
|
||||
// Typed nils can reach here, so initialize if we are nil
|
||||
if err == nil {
|
||||
err = new(Error)
|
||||
}
|
||||
|
||||
// Wrap each of the errors
|
||||
for i, e := range err.Errors {
|
||||
err.Errors[i] = errwrap.Wrapf(format, e)
|
||||
}
|
||||
|
||||
return err
|
||||
default:
|
||||
return errwrap.Wrapf(format, err)
|
||||
}
|
||||
}
|
12
vendor/github.com/hashicorp/go-rootcerts/.travis.yml
generated
vendored
Normal file
12
vendor/github.com/hashicorp/go-rootcerts/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
sudo: false
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.6
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
script: make test
|
363
vendor/github.com/hashicorp/go-rootcerts/LICENSE
generated
vendored
Normal file
363
vendor/github.com/hashicorp/go-rootcerts/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,363 @@
|
|||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. "Contributor"
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the terms of
|
||||
a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
|
||||
means a work that combines Covered Software with other material, in a
|
||||
separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether
|
||||
at the time of the initial grant or subsequently, any and all of the
|
||||
rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the License,
|
||||
by the making, using, selling, offering for sale, having made, import,
|
||||
or transfer of either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, "control" means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights to
|
||||
grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter the
|
||||
recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty, or
|
||||
limitations of liability) contained within the Source Code Form of the
|
||||
Covered Software, except that You may alter any license notices to the
|
||||
extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute,
|
||||
judicial order, or regulation then You must: (a) comply with the terms of
|
||||
this License to the maximum extent possible; and (b) describe the
|
||||
limitations and the code they affect. Such description must be placed in a
|
||||
text file included with all distributions of the Covered Software under
|
||||
this License. Except to the extent prohibited by statute or regulation,
|
||||
such description must be sufficiently detailed for a recipient of ordinary
|
||||
skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing
|
||||
basis, if such Contributor fails to notify You of the non-compliance by
|
||||
some reasonable means prior to 60 days after You have come back into
|
||||
compliance. Moreover, Your grants from a particular Contributor are
|
||||
reinstated on an ongoing basis if such Contributor notifies You of the
|
||||
non-compliance by some reasonable means, this is the first time You have
|
||||
received notice of non-compliance with this License from such
|
||||
Contributor, and You become compliant prior to 30 days after Your receipt
|
||||
of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an "as is" basis,
|
||||
without warranty of any kind, either expressed, implied, or statutory,
|
||||
including, without limitation, warranties that the Covered Software is free
|
||||
of defects, merchantable, fit for a particular purpose or non-infringing.
|
||||
The entire risk as to the quality and performance of the Covered Software
|
||||
is with You. Should any Covered Software prove defective in any respect,
|
||||
You (not any Contributor) assume the cost of any necessary servicing,
|
||||
repair, or correction. This disclaimer of warranty constitutes an essential
|
||||
part of this License. No use of any Covered Software is authorized under
|
||||
this License except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from
|
||||
such party's negligence to the extent applicable law prohibits such
|
||||
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
||||
incidental or consequential damages, so this exclusion and limitation may
|
||||
not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts
|
||||
of a jurisdiction where the defendant maintains its principal place of
|
||||
business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
||||
in this Section shall prevent a party's ability to bring cross-claims or
|
||||
counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides that
|
||||
the language of a contract shall be construed against the drafter shall not
|
||||
be used to construe this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses If You choose to distribute Source Code Form that is
|
||||
Incompatible With Secondary Licenses under the terms of this version of
|
||||
the License, the notice described in Exhibit B of this License must be
|
||||
attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file,
|
||||
then You may include the notice in a location (such as a LICENSE file in a
|
||||
relevant directory) where a recipient would be likely to look for such a
|
||||
notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
|
||||
This Source Code Form is "Incompatible
|
||||
With Secondary Licenses", as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
||||
|
8
vendor/github.com/hashicorp/go-rootcerts/Makefile
generated
vendored
Normal file
8
vendor/github.com/hashicorp/go-rootcerts/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
TEST?=./...
|
||||
|
||||
test:
|
||||
go test $(TEST) $(TESTARGS) -timeout=3s -parallel=4
|
||||
go vet $(TEST)
|
||||
go test $(TEST) -race
|
||||
|
||||
.PHONY: test
|
43
vendor/github.com/hashicorp/go-rootcerts/README.md
generated
vendored
Normal file
43
vendor/github.com/hashicorp/go-rootcerts/README.md
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
# rootcerts
|
||||
|
||||
Functions for loading root certificates for TLS connections.
|
||||
|
||||
-----
|
||||
|
||||
Go's standard library `crypto/tls` provides a common mechanism for configuring
|
||||
TLS connections in `tls.Config`. The `RootCAs` field on this struct is a pool
|
||||
of certificates for the client to use as a trust store when verifying server
|
||||
certificates.
|
||||
|
||||
This library contains utility functions for loading certificates destined for
|
||||
that field, as well as one other important thing:
|
||||
|
||||
When the `RootCAs` field is `nil`, the standard library attempts to load the
|
||||
host's root CA set. This behavior is OS-specific, and the Darwin
|
||||
implementation contains [a bug that prevents trusted certificates from the
|
||||
System and Login keychains from being loaded][1]. This library contains
|
||||
Darwin-specific behavior that works around that bug.
|
||||
|
||||
[1]: https://github.com/golang/go/issues/14514
|
||||
|
||||
## Example Usage
|
||||
|
||||
Here's a snippet demonstrating how this library is meant to be used:
|
||||
|
||||
```go
|
||||
func httpClient() (*http.Client, error)
|
||||
tlsConfig := &tls.Config{}
|
||||
err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{
|
||||
CAFile: os.Getenv("MYAPP_CAFILE"),
|
||||
CAPath: os.Getenv("MYAPP_CAPATH"),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := cleanhttp.DefaultClient()
|
||||
t := cleanhttp.DefaultTransport()
|
||||
t.TLSClientConfig = tlsConfig
|
||||
c.Transport = t
|
||||
return c, nil
|
||||
}
|
||||
```
|
9
vendor/github.com/hashicorp/go-rootcerts/doc.go
generated
vendored
Normal file
9
vendor/github.com/hashicorp/go-rootcerts/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
// Package rootcerts contains functions to aid in loading CA certificates for
|
||||
// TLS connections.
|
||||
//
|
||||
// In addition, its default behavior on Darwin works around an open issue [1]
|
||||
// in Go's crypto/x509 that prevents certicates from being loaded from the
|
||||
// System or Login keychains.
|
||||
//
|
||||
// [1] https://github.com/golang/go/issues/14514
|
||||
package rootcerts
|
103
vendor/github.com/hashicorp/go-rootcerts/rootcerts.go
generated
vendored
Normal file
103
vendor/github.com/hashicorp/go-rootcerts/rootcerts.go
generated
vendored
Normal file
|
@ -0,0 +1,103 @@
|
|||
package rootcerts
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Config determines where LoadCACerts will load certificates from. When both
|
||||
// CAFile and CAPath are blank, this library's functions will either load
|
||||
// system roots explicitly and return them, or set the CertPool to nil to allow
|
||||
// Go's standard library to load system certs.
|
||||
type Config struct {
|
||||
// CAFile is a path to a PEM-encoded certificate file or bundle. Takes
|
||||
// precedence over CAPath.
|
||||
CAFile string
|
||||
|
||||
// CAPath is a path to a directory populated with PEM-encoded certificates.
|
||||
CAPath string
|
||||
}
|
||||
|
||||
// ConfigureTLS sets up the RootCAs on the provided tls.Config based on the
|
||||
// Config specified.
|
||||
func ConfigureTLS(t *tls.Config, c *Config) error {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
pool, err := LoadCACerts(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.RootCAs = pool
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadCACerts loads a CertPool based on the Config specified.
|
||||
func LoadCACerts(c *Config) (*x509.CertPool, error) {
|
||||
if c == nil {
|
||||
c = &Config{}
|
||||
}
|
||||
if c.CAFile != "" {
|
||||
return LoadCAFile(c.CAFile)
|
||||
}
|
||||
if c.CAPath != "" {
|
||||
return LoadCAPath(c.CAPath)
|
||||
}
|
||||
|
||||
return LoadSystemCAs()
|
||||
}
|
||||
|
||||
// LoadCAFile loads a single PEM-encoded file from the path specified.
|
||||
func LoadCAFile(caFile string) (*x509.CertPool, error) {
|
||||
pool := x509.NewCertPool()
|
||||
|
||||
pem, err := ioutil.ReadFile(caFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error loading CA File: %s", err)
|
||||
}
|
||||
|
||||
ok := pool.AppendCertsFromPEM(pem)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Error loading CA File: Couldn't parse PEM in: %s", caFile)
|
||||
}
|
||||
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
// LoadCAPath walks the provided path and loads all certificates encounted into
|
||||
// a pool.
|
||||
func LoadCAPath(caPath string) (*x509.CertPool, error) {
|
||||
pool := x509.NewCertPool()
|
||||
walkFn := func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
pem, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error loading file from CAPath: %s", err)
|
||||
}
|
||||
|
||||
ok := pool.AppendCertsFromPEM(pem)
|
||||
if !ok {
|
||||
return fmt.Errorf("Error loading CA Path: Couldn't parse PEM in: %s", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
err := filepath.Walk(caPath, walkFn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pool, nil
|
||||
}
|
12
vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go
generated
vendored
Normal file
12
vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
// +build !darwin
|
||||
|
||||
package rootcerts
|
||||
|
||||
import "crypto/x509"
|
||||
|
||||
// LoadSystemCAs does nothing on non-Darwin systems. We return nil so that
|
||||
// default behavior of standard TLS config libraries is triggered, which is to
|
||||
// load system certs.
|
||||
func LoadSystemCAs() (*x509.CertPool, error) {
|
||||
return nil, nil
|
||||
}
|
48
vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go
generated
vendored
Normal file
48
vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
package rootcerts
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"os/exec"
|
||||
"path"
|
||||
|
||||
"github.com/mitchellh/go-homedir"
|
||||
)
|
||||
|
||||
// LoadSystemCAs has special behavior on Darwin systems to work around
|
||||
func LoadSystemCAs() (*x509.CertPool, error) {
|
||||
pool := x509.NewCertPool()
|
||||
|
||||
for _, keychain := range certKeychains() {
|
||||
err := addCertsFromKeychain(pool, keychain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
func addCertsFromKeychain(pool *x509.CertPool, keychain string) error {
|
||||
cmd := exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", keychain)
|
||||
data, err := cmd.Output()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pool.AppendCertsFromPEM(data)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func certKeychains() []string {
|
||||
keychains := []string{
|
||||
"/System/Library/Keychains/SystemRootCertificates.keychain",
|
||||
"/Library/Keychains/System.keychain",
|
||||
}
|
||||
home, err := homedir.Dir()
|
||||
if err == nil {
|
||||
loginKeychain := path.Join(home, "Library", "Keychains", "login.keychain")
|
||||
keychains = append(keychains, loginKeychain)
|
||||
}
|
||||
return keychains
|
||||
}
|
9
vendor/github.com/hashicorp/hcl/.gitignore
generated
vendored
Normal file
9
vendor/github.com/hashicorp/hcl/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
y.output
|
||||
|
||||
# ignore intellij files
|
||||
.idea
|
||||
*.iml
|
||||
*.ipr
|
||||
*.iws
|
||||
|
||||
*.test
|
3
vendor/github.com/hashicorp/hcl/.travis.yml
generated
vendored
Normal file
3
vendor/github.com/hashicorp/hcl/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
sudo: false
|
||||
language: go
|
||||
go: 1.7
|
354
vendor/github.com/hashicorp/hcl/LICENSE
generated
vendored
Normal file
354
vendor/github.com/hashicorp/hcl/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,354 @@
|
|||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. “Contributor”
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. “Contributor Version”
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor’s Contribution.
|
||||
|
||||
1.3. “Contribution”
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. “Covered Software”
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. “Incompatible With Secondary Licenses”
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of version
|
||||
1.1 or earlier of the License, but not also under the terms of a
|
||||
Secondary License.
|
||||
|
||||
1.6. “Executable Form”
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. “Larger Work”
|
||||
|
||||
means a work that combines Covered Software with other material, in a separate
|
||||
file or files, that is not Covered Software.
|
||||
|
||||
1.8. “License”
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. “Licensable”
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether at the
|
||||
time of the initial grant or subsequently, any and all of the rights conveyed by
|
||||
this License.
|
||||
|
||||
1.10. “Modifications”
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to, deletion
|
||||
from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. “Patent Claims” of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method, process,
|
||||
and apparatus claims, in any patent Licensable by such Contributor that
|
||||
would be infringed, but for the grant of the License, by the making,
|
||||
using, selling, offering for sale, having made, import, or transfer of
|
||||
either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. “Secondary License”
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. “Source Code Form”
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. “You” (or “Your”)
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, “You” includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, “control” means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or as
|
||||
part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its Contributions
|
||||
or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution become
|
||||
effective for each Contribution on the date the Contributor first distributes
|
||||
such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under this
|
||||
License. No additional rights or licenses will be implied from the distribution
|
||||
or licensing of Covered Software under this License. Notwithstanding Section
|
||||
2.1(b) above, no patent license is granted by a Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party’s
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of its
|
||||
Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks, or
|
||||
logos of any Contributor (except as may be necessary to comply with the
|
||||
notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this License
|
||||
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
||||
under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its Contributions
|
||||
are its original creation(s) or it has sufficient rights to grant the
|
||||
rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under applicable
|
||||
copyright doctrines of fair use, fair dealing, or other equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under the
|
||||
terms of this License. You must inform recipients that the Source Code Form
|
||||
of the Covered Software is governed by the terms of this License, and how
|
||||
they can obtain a copy of this License. You may not attempt to alter or
|
||||
restrict the recipients’ rights in the Source Code Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this License,
|
||||
or sublicense it under different terms, provided that the license for
|
||||
the Executable Form does not attempt to limit or alter the recipients’
|
||||
rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for the
|
||||
Covered Software. If the Larger Work is a combination of Covered Software
|
||||
with a work governed by one or more Secondary Licenses, and the Covered
|
||||
Software is not Incompatible With Secondary Licenses, this License permits
|
||||
You to additionally distribute such Covered Software under the terms of
|
||||
such Secondary License(s), so that the recipient of the Larger Work may, at
|
||||
their option, further distribute the Covered Software under the terms of
|
||||
either this License or such Secondary License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices (including
|
||||
copyright notices, patent notices, disclaimers of warranty, or limitations
|
||||
of liability) contained within the Source Code Form of the Covered
|
||||
Software, except that You may alter any license notices to the extent
|
||||
required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on behalf
|
||||
of any Contributor. You must make it absolutely clear that any such
|
||||
warranty, support, indemnity, or liability obligation is offered by You
|
||||
alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute, judicial
|
||||
order, or regulation then You must: (a) comply with the terms of this License
|
||||
to the maximum extent possible; and (b) describe the limitations and the code
|
||||
they affect. Such description must be placed in a text file included with all
|
||||
distributions of the Covered Software under this License. Except to the
|
||||
extent prohibited by statute or regulation, such description must be
|
||||
sufficiently detailed for a recipient of ordinary skill to be able to
|
||||
understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
||||
if such Contributor fails to notify You of the non-compliance by some
|
||||
reasonable means prior to 60 days after You have come back into compliance.
|
||||
Moreover, Your grants from a particular Contributor are reinstated on an
|
||||
ongoing basis if such Contributor notifies You of the non-compliance by
|
||||
some reasonable means, this is the first time You have received notice of
|
||||
non-compliance with this License from such Contributor, and You become
|
||||
compliant prior to 30 days after Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions, counter-claims,
|
||||
and cross-claims) alleging that a Contributor Version directly or
|
||||
indirectly infringes any patent, then the rights granted to You by any and
|
||||
all Contributors for the Covered Software under Section 2.1 of this License
|
||||
shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an “as is” basis, without
|
||||
warranty of any kind, either expressed, implied, or statutory, including,
|
||||
without limitation, warranties that the Covered Software is free of defects,
|
||||
merchantable, fit for a particular purpose or non-infringing. The entire
|
||||
risk as to the quality and performance of the Covered Software is with You.
|
||||
Should any Covered Software prove defective in any respect, You (not any
|
||||
Contributor) assume the cost of any necessary servicing, repair, or
|
||||
correction. This disclaimer of warranty constitutes an essential part of this
|
||||
License. No use of any Covered Software is authorized under this License
|
||||
except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from such
|
||||
party’s negligence to the extent applicable law prohibits such limitation.
|
||||
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
||||
consequential damages, so this exclusion and limitation may not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts of
|
||||
a jurisdiction where the defendant maintains its principal place of business
|
||||
and such litigation shall be governed by laws of that jurisdiction, without
|
||||
reference to its conflict-of-law provisions. Nothing in this Section shall
|
||||
prevent a party’s ability to bring cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject matter
|
||||
hereof. If any provision of this License is held to be unenforceable, such
|
||||
provision shall be reformed only to the extent necessary to make it
|
||||
enforceable. Any law or regulation which provides that the language of a
|
||||
contract shall be construed against the drafter shall not be used to construe
|
||||
this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version of
|
||||
the License under which You originally received the Covered Software, or
|
||||
under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a modified
|
||||
version of this License if you rename the license and remove any
|
||||
references to the name of the license steward (except to note that such
|
||||
modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file, then
|
||||
You may include the notice in a location (such as a LICENSE file in a relevant
|
||||
directory) where a recipient would be likely to look for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
||||
|
||||
This Source Code Form is “Incompatible
|
||||
With Secondary Licenses”, as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
||||
|
18
vendor/github.com/hashicorp/hcl/Makefile
generated
vendored
Normal file
18
vendor/github.com/hashicorp/hcl/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
TEST?=./...
|
||||
|
||||
default: test
|
||||
|
||||
fmt: generate
|
||||
go fmt ./...
|
||||
|
||||
test: generate
|
||||
go get -t ./...
|
||||
go test $(TEST) $(TESTARGS)
|
||||
|
||||
generate:
|
||||
go generate ./...
|
||||
|
||||
updatedeps:
|
||||
go get -u golang.org/x/tools/cmd/stringer
|
||||
|
||||
.PHONY: default generate test updatedeps
|
125
vendor/github.com/hashicorp/hcl/README.md
generated
vendored
Normal file
125
vendor/github.com/hashicorp/hcl/README.md
generated
vendored
Normal file
|
@ -0,0 +1,125 @@
|
|||
# HCL
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl)
|
||||
|
||||
HCL (HashiCorp Configuration Language) is a configuration language built
|
||||
by HashiCorp. The goal of HCL is to build a structured configuration language
|
||||
that is both human and machine friendly for use with command-line tools, but
|
||||
specifically targeted towards DevOps tools, servers, etc.
|
||||
|
||||
HCL is also fully JSON compatible. That is, JSON can be used as completely
|
||||
valid input to a system expecting HCL. This helps makes systems
|
||||
interoperable with other systems.
|
||||
|
||||
HCL is heavily inspired by
|
||||
[libucl](https://github.com/vstakhov/libucl),
|
||||
nginx configuration, and others similar.
|
||||
|
||||
## Why?
|
||||
|
||||
A common question when viewing HCL is to ask the question: why not
|
||||
JSON, YAML, etc.?
|
||||
|
||||
Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com)
|
||||
used a variety of configuration languages from full programming languages
|
||||
such as Ruby to complete data structure languages such as JSON. What we
|
||||
learned is that some people wanted human-friendly configuration languages
|
||||
and some people wanted machine-friendly languages.
|
||||
|
||||
JSON fits a nice balance in this, but is fairly verbose and most
|
||||
importantly doesn't support comments. With YAML, we found that beginners
|
||||
had a really hard time determining what the actual structure was, and
|
||||
ended up guessing more often than not whether to use a hyphen, colon, etc.
|
||||
in order to represent some configuration key.
|
||||
|
||||
Full programming languages such as Ruby enable complex behavior
|
||||
a configuration language shouldn't usually allow, and also forces
|
||||
people to learn some set of Ruby.
|
||||
|
||||
Because of this, we decided to create our own configuration language
|
||||
that is JSON-compatible. Our configuration language (HCL) is designed
|
||||
to be written and modified by humans. The API for HCL allows JSON
|
||||
as an input so that it is also machine-friendly (machines can generate
|
||||
JSON instead of trying to generate HCL).
|
||||
|
||||
Our goal with HCL is not to alienate other configuration languages.
|
||||
It is instead to provide HCL as a specialized language for our tools,
|
||||
and JSON as the interoperability layer.
|
||||
|
||||
## Syntax
|
||||
|
||||
For a complete grammar, please see the parser itself. A high-level overview
|
||||
of the syntax and grammar is listed here.
|
||||
|
||||
* Single line comments start with `#` or `//`
|
||||
|
||||
* Multi-line comments are wrapped in `/*` and `*/`. Nested block comments
|
||||
are not allowed. A multi-line comment (also known as a block comment)
|
||||
terminates at the first `*/` found.
|
||||
|
||||
* Values are assigned with the syntax `key = value` (whitespace doesn't
|
||||
matter). The value can be any primitive: a string, number, boolean,
|
||||
object, or list.
|
||||
|
||||
* Strings are double-quoted and can contain any UTF-8 characters.
|
||||
Example: `"Hello, World"`
|
||||
|
||||
* Multi-line strings start with `<<EOF` at the end of a line, and end
|
||||
with `EOF` on its own line ([here documents](https://en.wikipedia.org/wiki/Here_document)).
|
||||
Any text may be used in place of `EOF`. Example:
|
||||
```
|
||||
<<FOO
|
||||
hello
|
||||
world
|
||||
FOO
|
||||
```
|
||||
|
||||
* Numbers are assumed to be base 10. If you prefix a number with 0x,
|
||||
it is treated as a hexadecimal. If it is prefixed with 0, it is
|
||||
treated as an octal. Numbers can be in scientific notation: "1e10".
|
||||
|
||||
* Boolean values: `true`, `false`
|
||||
|
||||
* Arrays can be made by wrapping it in `[]`. Example:
|
||||
`["foo", "bar", 42]`. Arrays can contain primitives,
|
||||
other arrays, and objects. As an alternative, lists
|
||||
of objects can be created with repeated blocks, using
|
||||
this structure:
|
||||
|
||||
```hcl
|
||||
service {
|
||||
key = "value"
|
||||
}
|
||||
|
||||
service {
|
||||
key = "value"
|
||||
}
|
||||
```
|
||||
|
||||
Objects and nested objects are created using the structure shown below:
|
||||
|
||||
```
|
||||
variable "ami" {
|
||||
description = "the AMI to use"
|
||||
}
|
||||
```
|
||||
This would be equivalent to the following json:
|
||||
``` json
|
||||
{
|
||||
"variable": {
|
||||
"ami": {
|
||||
"description": "the AMI to use"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Thanks
|
||||
|
||||
Thanks to:
|
||||
|
||||
* [@vstakhov](https://github.com/vstakhov) - The original libucl parser
|
||||
and syntax that HCL was based off of.
|
||||
|
||||
* [@fatih](https://github.com/fatih) - The rewritten HCL parser
|
||||
in pure Go (no goyacc) and support for a printer.
|
19
vendor/github.com/hashicorp/hcl/appveyor.yml
generated
vendored
Normal file
19
vendor/github.com/hashicorp/hcl/appveyor.yml
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
version: "build-{branch}-{build}"
|
||||
image: Visual Studio 2015
|
||||
clone_folder: c:\gopath\src\github.com\hashicorp\hcl
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
init:
|
||||
- git config --global core.autocrlf true
|
||||
install:
|
||||
- cmd: >-
|
||||
echo %Path%
|
||||
|
||||
go version
|
||||
|
||||
go env
|
||||
|
||||
go get -t ./...
|
||||
|
||||
build_script:
|
||||
- cmd: go test -v ./...
|
716
vendor/github.com/hashicorp/hcl/decoder.go
generated
vendored
Normal file
716
vendor/github.com/hashicorp/hcl/decoder.go
generated
vendored
Normal file
|
@ -0,0 +1,716 @@
|
|||
package hcl
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
"github.com/hashicorp/hcl/hcl/parser"
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
// This is the tag to use with structures to have settings for HCL
|
||||
const tagName = "hcl"
|
||||
|
||||
var (
|
||||
// nodeType holds a reference to the type of ast.Node
|
||||
nodeType reflect.Type = findNodeType()
|
||||
)
|
||||
|
||||
// Unmarshal accepts a byte slice as input and writes the
|
||||
// data to the value pointed to by v.
|
||||
func Unmarshal(bs []byte, v interface{}) error {
|
||||
root, err := parse(bs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return DecodeObject(v, root)
|
||||
}
|
||||
|
||||
// Decode reads the given input and decodes it into the structure
|
||||
// given by `out`.
|
||||
func Decode(out interface{}, in string) error {
|
||||
obj, err := Parse(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return DecodeObject(out, obj)
|
||||
}
|
||||
|
||||
// DecodeObject is a lower-level version of Decode. It decodes a
|
||||
// raw Object into the given output.
|
||||
func DecodeObject(out interface{}, n ast.Node) error {
|
||||
val := reflect.ValueOf(out)
|
||||
if val.Kind() != reflect.Ptr {
|
||||
return errors.New("result must be a pointer")
|
||||
}
|
||||
|
||||
// If we have the file, we really decode the root node
|
||||
if f, ok := n.(*ast.File); ok {
|
||||
n = f.Node
|
||||
}
|
||||
|
||||
var d decoder
|
||||
return d.decode("root", n, val.Elem())
|
||||
}
|
||||
|
||||
type decoder struct {
|
||||
stack []reflect.Kind
|
||||
}
|
||||
|
||||
func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error {
|
||||
k := result
|
||||
|
||||
// If we have an interface with a valid value, we use that
|
||||
// for the check.
|
||||
if result.Kind() == reflect.Interface {
|
||||
elem := result.Elem()
|
||||
if elem.IsValid() {
|
||||
k = elem
|
||||
}
|
||||
}
|
||||
|
||||
// Push current onto stack unless it is an interface.
|
||||
if k.Kind() != reflect.Interface {
|
||||
d.stack = append(d.stack, k.Kind())
|
||||
|
||||
// Schedule a pop
|
||||
defer func() {
|
||||
d.stack = d.stack[:len(d.stack)-1]
|
||||
}()
|
||||
}
|
||||
|
||||
switch k.Kind() {
|
||||
case reflect.Bool:
|
||||
return d.decodeBool(name, node, result)
|
||||
case reflect.Float64:
|
||||
return d.decodeFloat(name, node, result)
|
||||
case reflect.Int:
|
||||
return d.decodeInt(name, node, result)
|
||||
case reflect.Interface:
|
||||
// When we see an interface, we make our own thing
|
||||
return d.decodeInterface(name, node, result)
|
||||
case reflect.Map:
|
||||
return d.decodeMap(name, node, result)
|
||||
case reflect.Ptr:
|
||||
return d.decodePtr(name, node, result)
|
||||
case reflect.Slice:
|
||||
return d.decodeSlice(name, node, result)
|
||||
case reflect.String:
|
||||
return d.decodeString(name, node, result)
|
||||
case reflect.Struct:
|
||||
return d.decodeStruct(name, node, result)
|
||||
default:
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error {
|
||||
switch n := node.(type) {
|
||||
case *ast.LiteralType:
|
||||
if n.Token.Type == token.BOOL {
|
||||
v, err := strconv.ParseBool(n.Token.Text)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result.Set(reflect.ValueOf(v))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown type %T", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
|
||||
switch n := node.(type) {
|
||||
case *ast.LiteralType:
|
||||
if n.Token.Type == token.FLOAT {
|
||||
v, err := strconv.ParseFloat(n.Token.Text, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result.Set(reflect.ValueOf(v))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown type %T", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error {
|
||||
switch n := node.(type) {
|
||||
case *ast.LiteralType:
|
||||
switch n.Token.Type {
|
||||
case token.NUMBER:
|
||||
v, err := strconv.ParseInt(n.Token.Text, 0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result.Set(reflect.ValueOf(int(v)))
|
||||
return nil
|
||||
case token.STRING:
|
||||
v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result.Set(reflect.ValueOf(int(v)))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown type %T", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error {
|
||||
// When we see an ast.Node, we retain the value to enable deferred decoding.
|
||||
// Very useful in situations where we want to preserve ast.Node information
|
||||
// like Pos
|
||||
if result.Type() == nodeType && result.CanSet() {
|
||||
result.Set(reflect.ValueOf(node))
|
||||
return nil
|
||||
}
|
||||
|
||||
var set reflect.Value
|
||||
redecode := true
|
||||
|
||||
// For testing types, ObjectType should just be treated as a list. We
|
||||
// set this to a temporary var because we want to pass in the real node.
|
||||
testNode := node
|
||||
if ot, ok := node.(*ast.ObjectType); ok {
|
||||
testNode = ot.List
|
||||
}
|
||||
|
||||
switch n := testNode.(type) {
|
||||
case *ast.ObjectList:
|
||||
// If we're at the root or we're directly within a slice, then we
|
||||
// decode objects into map[string]interface{}, otherwise we decode
|
||||
// them into lists.
|
||||
if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
|
||||
var temp map[string]interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeMap(
|
||||
reflect.MapOf(
|
||||
reflect.TypeOf(""),
|
||||
tempVal.Type().Elem()))
|
||||
|
||||
set = result
|
||||
} else {
|
||||
var temp []map[string]interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeSlice(
|
||||
reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items))
|
||||
set = result
|
||||
}
|
||||
case *ast.ObjectType:
|
||||
// If we're at the root or we're directly within a slice, then we
|
||||
// decode objects into map[string]interface{}, otherwise we decode
|
||||
// them into lists.
|
||||
if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
|
||||
var temp map[string]interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeMap(
|
||||
reflect.MapOf(
|
||||
reflect.TypeOf(""),
|
||||
tempVal.Type().Elem()))
|
||||
|
||||
set = result
|
||||
} else {
|
||||
var temp []map[string]interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeSlice(
|
||||
reflect.SliceOf(tempVal.Type().Elem()), 0, 1)
|
||||
set = result
|
||||
}
|
||||
case *ast.ListType:
|
||||
var temp []interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeSlice(
|
||||
reflect.SliceOf(tempVal.Type().Elem()), 0, 0)
|
||||
set = result
|
||||
case *ast.LiteralType:
|
||||
switch n.Token.Type {
|
||||
case token.BOOL:
|
||||
var result bool
|
||||
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
|
||||
case token.FLOAT:
|
||||
var result float64
|
||||
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
|
||||
case token.NUMBER:
|
||||
var result int
|
||||
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
|
||||
case token.STRING, token.HEREDOC:
|
||||
set = reflect.Indirect(reflect.New(reflect.TypeOf("")))
|
||||
default:
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node),
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"%s: cannot decode into interface: %T",
|
||||
name, node)
|
||||
}
|
||||
|
||||
// Set the result to what its supposed to be, then reset
|
||||
// result so we don't reflect into this method anymore.
|
||||
result.Set(set)
|
||||
|
||||
if redecode {
|
||||
// Revisit the node so that we can use the newly instantiated
|
||||
// thing and populate it.
|
||||
if err := d.decode(name, node, result); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error {
|
||||
if item, ok := node.(*ast.ObjectItem); ok {
|
||||
node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
|
||||
}
|
||||
|
||||
if ot, ok := node.(*ast.ObjectType); ok {
|
||||
node = ot.List
|
||||
}
|
||||
|
||||
n, ok := node.(*ast.ObjectList)
|
||||
if !ok {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: not an object type for map (%T)", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
// If we have an interface, then we can address the interface,
|
||||
// but not the slice itself, so get the element but set the interface
|
||||
set := result
|
||||
if result.Kind() == reflect.Interface {
|
||||
result = result.Elem()
|
||||
}
|
||||
|
||||
resultType := result.Type()
|
||||
resultElemType := resultType.Elem()
|
||||
resultKeyType := resultType.Key()
|
||||
if resultKeyType.Kind() != reflect.String {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: map must have string keys", name),
|
||||
}
|
||||
}
|
||||
|
||||
// Make a map if it is nil
|
||||
resultMap := result
|
||||
if result.IsNil() {
|
||||
resultMap = reflect.MakeMap(
|
||||
reflect.MapOf(resultKeyType, resultElemType))
|
||||
}
|
||||
|
||||
// Go through each element and decode it.
|
||||
done := make(map[string]struct{})
|
||||
for _, item := range n.Items {
|
||||
if item.Val == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// github.com/hashicorp/terraform/issue/5740
|
||||
if len(item.Keys) == 0 {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: map must have string keys", name),
|
||||
}
|
||||
}
|
||||
|
||||
// Get the key we're dealing with, which is the first item
|
||||
keyStr := item.Keys[0].Token.Value().(string)
|
||||
|
||||
// If we've already processed this key, then ignore it
|
||||
if _, ok := done[keyStr]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Determine the value. If we have more than one key, then we
|
||||
// get the objectlist of only these keys.
|
||||
itemVal := item.Val
|
||||
if len(item.Keys) > 1 {
|
||||
itemVal = n.Filter(keyStr)
|
||||
done[keyStr] = struct{}{}
|
||||
}
|
||||
|
||||
// Make the field name
|
||||
fieldName := fmt.Sprintf("%s.%s", name, keyStr)
|
||||
|
||||
// Get the key/value as reflection values
|
||||
key := reflect.ValueOf(keyStr)
|
||||
val := reflect.Indirect(reflect.New(resultElemType))
|
||||
|
||||
// If we have a pre-existing value in the map, use that
|
||||
oldVal := resultMap.MapIndex(key)
|
||||
if oldVal.IsValid() {
|
||||
val.Set(oldVal)
|
||||
}
|
||||
|
||||
// Decode!
|
||||
if err := d.decode(fieldName, itemVal, val); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the value on the map
|
||||
resultMap.SetMapIndex(key, val)
|
||||
}
|
||||
|
||||
// Set the final map if we can
|
||||
set.Set(resultMap)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error {
|
||||
// Create an element of the concrete (non pointer) type and decode
|
||||
// into that. Then set the value of the pointer to this type.
|
||||
resultType := result.Type()
|
||||
resultElemType := resultType.Elem()
|
||||
val := reflect.New(resultElemType)
|
||||
if err := d.decode(name, node, reflect.Indirect(val)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result.Set(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error {
|
||||
// If we have an interface, then we can address the interface,
|
||||
// but not the slice itself, so get the element but set the interface
|
||||
set := result
|
||||
if result.Kind() == reflect.Interface {
|
||||
result = result.Elem()
|
||||
}
|
||||
// Create the slice if it isn't nil
|
||||
resultType := result.Type()
|
||||
resultElemType := resultType.Elem()
|
||||
if result.IsNil() {
|
||||
resultSliceType := reflect.SliceOf(resultElemType)
|
||||
result = reflect.MakeSlice(
|
||||
resultSliceType, 0, 0)
|
||||
}
|
||||
|
||||
// Figure out the items we'll be copying into the slice
|
||||
var items []ast.Node
|
||||
switch n := node.(type) {
|
||||
case *ast.ObjectList:
|
||||
items = make([]ast.Node, len(n.Items))
|
||||
for i, item := range n.Items {
|
||||
items[i] = item
|
||||
}
|
||||
case *ast.ObjectType:
|
||||
items = []ast.Node{n}
|
||||
case *ast.ListType:
|
||||
items = n.List
|
||||
default:
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("unknown slice type: %T", node),
|
||||
}
|
||||
}
|
||||
|
||||
for i, item := range items {
|
||||
fieldName := fmt.Sprintf("%s[%d]", name, i)
|
||||
|
||||
// Decode
|
||||
val := reflect.Indirect(reflect.New(resultElemType))
|
||||
|
||||
// if item is an object that was decoded from ambiguous JSON and
|
||||
// flattened, make sure it's expanded if it needs to decode into a
|
||||
// defined structure.
|
||||
item := expandObject(item, val)
|
||||
|
||||
if err := d.decode(fieldName, item, val); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Append it onto the slice
|
||||
result = reflect.Append(result, val)
|
||||
}
|
||||
|
||||
set.Set(result)
|
||||
return nil
|
||||
}
|
||||
|
||||
// expandObject detects if an ambiguous JSON object was flattened to a List which
|
||||
// should be decoded into a struct, and expands the ast to properly deocode.
|
||||
func expandObject(node ast.Node, result reflect.Value) ast.Node {
|
||||
item, ok := node.(*ast.ObjectItem)
|
||||
if !ok {
|
||||
return node
|
||||
}
|
||||
|
||||
elemType := result.Type()
|
||||
|
||||
// our target type must be a struct
|
||||
switch elemType.Kind() {
|
||||
case reflect.Ptr:
|
||||
switch elemType.Elem().Kind() {
|
||||
case reflect.Struct:
|
||||
//OK
|
||||
default:
|
||||
return node
|
||||
}
|
||||
case reflect.Struct:
|
||||
//OK
|
||||
default:
|
||||
return node
|
||||
}
|
||||
|
||||
// A list value will have a key and field name. If it had more fields,
|
||||
// it wouldn't have been flattened.
|
||||
if len(item.Keys) != 2 {
|
||||
return node
|
||||
}
|
||||
|
||||
keyToken := item.Keys[0].Token
|
||||
item.Keys = item.Keys[1:]
|
||||
|
||||
// we need to un-flatten the ast enough to decode
|
||||
newNode := &ast.ObjectItem{
|
||||
Keys: []*ast.ObjectKey{
|
||||
&ast.ObjectKey{
|
||||
Token: keyToken,
|
||||
},
|
||||
},
|
||||
Val: &ast.ObjectType{
|
||||
List: &ast.ObjectList{
|
||||
Items: []*ast.ObjectItem{item},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return newNode
|
||||
}
|
||||
|
||||
func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
|
||||
switch n := node.(type) {
|
||||
case *ast.LiteralType:
|
||||
switch n.Token.Type {
|
||||
case token.NUMBER:
|
||||
result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type()))
|
||||
return nil
|
||||
case token.STRING, token.HEREDOC:
|
||||
result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type()))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown type for string %T", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error {
|
||||
var item *ast.ObjectItem
|
||||
if it, ok := node.(*ast.ObjectItem); ok {
|
||||
item = it
|
||||
node = it.Val
|
||||
}
|
||||
|
||||
if ot, ok := node.(*ast.ObjectType); ok {
|
||||
node = ot.List
|
||||
}
|
||||
|
||||
// Handle the special case where the object itself is a literal. Previously
|
||||
// the yacc parser would always ensure top-level elements were arrays. The new
|
||||
// parser does not make the same guarantees, thus we need to convert any
|
||||
// top-level literal elements into a list.
|
||||
if _, ok := node.(*ast.LiteralType); ok && item != nil {
|
||||
node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
|
||||
}
|
||||
|
||||
list, ok := node.(*ast.ObjectList)
|
||||
if !ok {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
// This slice will keep track of all the structs we'll be decoding.
|
||||
// There can be more than one struct if there are embedded structs
|
||||
// that are squashed.
|
||||
structs := make([]reflect.Value, 1, 5)
|
||||
structs[0] = result
|
||||
|
||||
// Compile the list of all the fields that we're going to be decoding
|
||||
// from all the structs.
|
||||
fields := make(map[*reflect.StructField]reflect.Value)
|
||||
for len(structs) > 0 {
|
||||
structVal := structs[0]
|
||||
structs = structs[1:]
|
||||
|
||||
structType := structVal.Type()
|
||||
for i := 0; i < structType.NumField(); i++ {
|
||||
fieldType := structType.Field(i)
|
||||
tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
|
||||
|
||||
// Ignore fields with tag name "-"
|
||||
if tagParts[0] == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
if fieldType.Anonymous {
|
||||
fieldKind := fieldType.Type.Kind()
|
||||
if fieldKind != reflect.Struct {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unsupported type to struct: %s",
|
||||
fieldType.Name, fieldKind),
|
||||
}
|
||||
}
|
||||
|
||||
// We have an embedded field. We "squash" the fields down
|
||||
// if specified in the tag.
|
||||
squash := false
|
||||
for _, tag := range tagParts[1:] {
|
||||
if tag == "squash" {
|
||||
squash = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if squash {
|
||||
structs = append(
|
||||
structs, result.FieldByName(fieldType.Name))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Normal struct field, store it away
|
||||
fields[&fieldType] = structVal.Field(i)
|
||||
}
|
||||
}
|
||||
|
||||
usedKeys := make(map[string]struct{})
|
||||
decodedFields := make([]string, 0, len(fields))
|
||||
decodedFieldsVal := make([]reflect.Value, 0)
|
||||
unusedKeysVal := make([]reflect.Value, 0)
|
||||
for fieldType, field := range fields {
|
||||
if !field.IsValid() {
|
||||
// This should never happen
|
||||
panic("field is not valid")
|
||||
}
|
||||
|
||||
// If we can't set the field, then it is unexported or something,
|
||||
// and we just continue onwards.
|
||||
if !field.CanSet() {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldName := fieldType.Name
|
||||
|
||||
tagValue := fieldType.Tag.Get(tagName)
|
||||
tagParts := strings.SplitN(tagValue, ",", 2)
|
||||
if len(tagParts) >= 2 {
|
||||
switch tagParts[1] {
|
||||
case "decodedFields":
|
||||
decodedFieldsVal = append(decodedFieldsVal, field)
|
||||
continue
|
||||
case "key":
|
||||
if item == nil {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: %s asked for 'key', impossible",
|
||||
name, fieldName),
|
||||
}
|
||||
}
|
||||
|
||||
field.SetString(item.Keys[0].Token.Value().(string))
|
||||
continue
|
||||
case "unusedKeys":
|
||||
unusedKeysVal = append(unusedKeysVal, field)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if tagParts[0] != "" {
|
||||
fieldName = tagParts[0]
|
||||
}
|
||||
|
||||
// Determine the element we'll use to decode. If it is a single
|
||||
// match (only object with the field), then we decode it exactly.
|
||||
// If it is a prefix match, then we decode the matches.
|
||||
filter := list.Filter(fieldName)
|
||||
|
||||
prefixMatches := filter.Children()
|
||||
matches := filter.Elem()
|
||||
if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Track the used key
|
||||
usedKeys[fieldName] = struct{}{}
|
||||
|
||||
// Create the field name and decode. We range over the elements
|
||||
// because we actually want the value.
|
||||
fieldName = fmt.Sprintf("%s.%s", name, fieldName)
|
||||
if len(prefixMatches.Items) > 0 {
|
||||
if err := d.decode(fieldName, prefixMatches, field); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, match := range matches.Items {
|
||||
var decodeNode ast.Node = match.Val
|
||||
if ot, ok := decodeNode.(*ast.ObjectType); ok {
|
||||
decodeNode = &ast.ObjectList{Items: ot.List.Items}
|
||||
}
|
||||
|
||||
if err := d.decode(fieldName, decodeNode, field); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
decodedFields = append(decodedFields, fieldType.Name)
|
||||
}
|
||||
|
||||
if len(decodedFieldsVal) > 0 {
|
||||
// Sort it so that it is deterministic
|
||||
sort.Strings(decodedFields)
|
||||
|
||||
for _, v := range decodedFieldsVal {
|
||||
v.Set(reflect.ValueOf(decodedFields))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// findNodeType returns the type of ast.Node
|
||||
func findNodeType() reflect.Type {
|
||||
var nodeContainer struct {
|
||||
Node ast.Node
|
||||
}
|
||||
value := reflect.ValueOf(nodeContainer).FieldByName("Node")
|
||||
return value.Type()
|
||||
}
|
11
vendor/github.com/hashicorp/hcl/hcl.go
generated
vendored
Normal file
11
vendor/github.com/hashicorp/hcl/hcl.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
// Package hcl decodes HCL into usable Go structures.
|
||||
//
|
||||
// hcl input can come in either pure HCL format or JSON format.
|
||||
// It can be parsed into an AST, and then decoded into a structure,
|
||||
// or it can be decoded directly from a string into a structure.
|
||||
//
|
||||
// If you choose to parse HCL into a raw AST, the benefit is that you
|
||||
// can write custom visitor implementations to implement custom
|
||||
// semantic checks. By default, HCL does not perform any semantic
|
||||
// checks.
|
||||
package hcl
|
218
vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
generated
vendored
Normal file
218
vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
generated
vendored
Normal file
|
@ -0,0 +1,218 @@
|
|||
// Package ast declares the types used to represent syntax trees for HCL
|
||||
// (HashiCorp Configuration Language)
|
||||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
// Node is an element in the abstract syntax tree.
|
||||
type Node interface {
|
||||
node()
|
||||
Pos() token.Pos
|
||||
}
|
||||
|
||||
func (File) node() {}
|
||||
func (ObjectList) node() {}
|
||||
func (ObjectKey) node() {}
|
||||
func (ObjectItem) node() {}
|
||||
func (Comment) node() {}
|
||||
func (CommentGroup) node() {}
|
||||
func (ObjectType) node() {}
|
||||
func (LiteralType) node() {}
|
||||
func (ListType) node() {}
|
||||
|
||||
// File represents a single HCL file
|
||||
type File struct {
|
||||
Node Node // usually a *ObjectList
|
||||
Comments []*CommentGroup // list of all comments in the source
|
||||
}
|
||||
|
||||
func (f *File) Pos() token.Pos {
|
||||
return f.Node.Pos()
|
||||
}
|
||||
|
||||
// ObjectList represents a list of ObjectItems. An HCL file itself is an
|
||||
// ObjectList.
|
||||
type ObjectList struct {
|
||||
Items []*ObjectItem
|
||||
}
|
||||
|
||||
func (o *ObjectList) Add(item *ObjectItem) {
|
||||
o.Items = append(o.Items, item)
|
||||
}
|
||||
|
||||
// Filter filters out the objects with the given key list as a prefix.
|
||||
//
|
||||
// The returned list of objects contain ObjectItems where the keys have
|
||||
// this prefix already stripped off. This might result in objects with
|
||||
// zero-length key lists if they have no children.
|
||||
//
|
||||
// If no matches are found, an empty ObjectList (non-nil) is returned.
|
||||
func (o *ObjectList) Filter(keys ...string) *ObjectList {
|
||||
var result ObjectList
|
||||
for _, item := range o.Items {
|
||||
// If there aren't enough keys, then ignore this
|
||||
if len(item.Keys) < len(keys) {
|
||||
continue
|
||||
}
|
||||
|
||||
match := true
|
||||
for i, key := range item.Keys[:len(keys)] {
|
||||
key := key.Token.Value().(string)
|
||||
if key != keys[i] && !strings.EqualFold(key, keys[i]) {
|
||||
match = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if !match {
|
||||
continue
|
||||
}
|
||||
|
||||
// Strip off the prefix from the children
|
||||
newItem := *item
|
||||
newItem.Keys = newItem.Keys[len(keys):]
|
||||
result.Add(&newItem)
|
||||
}
|
||||
|
||||
return &result
|
||||
}
|
||||
|
||||
// Children returns further nested objects (key length > 0) within this
|
||||
// ObjectList. This should be used with Filter to get at child items.
|
||||
func (o *ObjectList) Children() *ObjectList {
|
||||
var result ObjectList
|
||||
for _, item := range o.Items {
|
||||
if len(item.Keys) > 0 {
|
||||
result.Add(item)
|
||||
}
|
||||
}
|
||||
|
||||
return &result
|
||||
}
|
||||
|
||||
// Elem returns items in the list that are direct element assignments
|
||||
// (key length == 0). This should be used with Filter to get at elements.
|
||||
func (o *ObjectList) Elem() *ObjectList {
|
||||
var result ObjectList
|
||||
for _, item := range o.Items {
|
||||
if len(item.Keys) == 0 {
|
||||
result.Add(item)
|
||||
}
|
||||
}
|
||||
|
||||
return &result
|
||||
}
|
||||
|
||||
func (o *ObjectList) Pos() token.Pos {
|
||||
// always returns the uninitiliazed position
|
||||
return o.Items[0].Pos()
|
||||
}
|
||||
|
||||
// ObjectItem represents a HCL Object Item. An item is represented with a key
|
||||
// (or keys). It can be an assignment or an object (both normal and nested)
|
||||
type ObjectItem struct {
|
||||
// keys is only one length long if it's of type assignment. If it's a
|
||||
// nested object it can be larger than one. In that case "assign" is
|
||||
// invalid as there is no assignments for a nested object.
|
||||
Keys []*ObjectKey
|
||||
|
||||
// assign contains the position of "=", if any
|
||||
Assign token.Pos
|
||||
|
||||
// val is the item itself. It can be an object,list, number, bool or a
|
||||
// string. If key length is larger than one, val can be only of type
|
||||
// Object.
|
||||
Val Node
|
||||
|
||||
LeadComment *CommentGroup // associated lead comment
|
||||
LineComment *CommentGroup // associated line comment
|
||||
}
|
||||
|
||||
func (o *ObjectItem) Pos() token.Pos {
|
||||
// I'm not entirely sure what causes this, but removing this causes
|
||||
// a test failure. We should investigate at some point.
|
||||
if len(o.Keys) == 0 {
|
||||
return token.Pos{}
|
||||
}
|
||||
|
||||
return o.Keys[0].Pos()
|
||||
}
|
||||
|
||||
// ObjectKeys are either an identifier or of type string.
|
||||
type ObjectKey struct {
|
||||
Token token.Token
|
||||
}
|
||||
|
||||
func (o *ObjectKey) Pos() token.Pos {
|
||||
return o.Token.Pos
|
||||
}
|
||||
|
||||
// LiteralType represents a literal of basic type. Valid types are:
|
||||
// token.NUMBER, token.FLOAT, token.BOOL and token.STRING
|
||||
type LiteralType struct {
|
||||
Token token.Token
|
||||
|
||||
// associated line comment, only when used in a list
|
||||
LineComment *CommentGroup
|
||||
}
|
||||
|
||||
func (l *LiteralType) Pos() token.Pos {
|
||||
return l.Token.Pos
|
||||
}
|
||||
|
||||
// ListStatement represents a HCL List type
|
||||
type ListType struct {
|
||||
Lbrack token.Pos // position of "["
|
||||
Rbrack token.Pos // position of "]"
|
||||
List []Node // the elements in lexical order
|
||||
}
|
||||
|
||||
func (l *ListType) Pos() token.Pos {
|
||||
return l.Lbrack
|
||||
}
|
||||
|
||||
func (l *ListType) Add(node Node) {
|
||||
l.List = append(l.List, node)
|
||||
}
|
||||
|
||||
// ObjectType represents a HCL Object Type
|
||||
type ObjectType struct {
|
||||
Lbrace token.Pos // position of "{"
|
||||
Rbrace token.Pos // position of "}"
|
||||
List *ObjectList // the nodes in lexical order
|
||||
}
|
||||
|
||||
func (o *ObjectType) Pos() token.Pos {
|
||||
return o.Lbrace
|
||||
}
|
||||
|
||||
// Comment node represents a single //, # style or /*- style commment
|
||||
type Comment struct {
|
||||
Start token.Pos // position of / or #
|
||||
Text string
|
||||
}
|
||||
|
||||
func (c *Comment) Pos() token.Pos {
|
||||
return c.Start
|
||||
}
|
||||
|
||||
// CommentGroup node represents a sequence of comments with no other tokens and
|
||||
// no empty lines between.
|
||||
type CommentGroup struct {
|
||||
List []*Comment // len(List) > 0
|
||||
}
|
||||
|
||||
func (c *CommentGroup) Pos() token.Pos {
|
||||
return c.List[0].Pos()
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// GoStringer
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) }
|
||||
func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }
|
52
vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
generated
vendored
Normal file
52
vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
package ast
|
||||
|
||||
import "fmt"
|
||||
|
||||
// WalkFunc describes a function to be called for each node during a Walk. The
|
||||
// returned node can be used to rewrite the AST. Walking stops the returned
|
||||
// bool is false.
|
||||
type WalkFunc func(Node) (Node, bool)
|
||||
|
||||
// Walk traverses an AST in depth-first order: It starts by calling fn(node);
|
||||
// node must not be nil. If fn returns true, Walk invokes fn recursively for
|
||||
// each of the non-nil children of node, followed by a call of fn(nil). The
|
||||
// returned node of fn can be used to rewrite the passed node to fn.
|
||||
func Walk(node Node, fn WalkFunc) Node {
|
||||
rewritten, ok := fn(node)
|
||||
if !ok {
|
||||
return rewritten
|
||||
}
|
||||
|
||||
switch n := node.(type) {
|
||||
case *File:
|
||||
n.Node = Walk(n.Node, fn)
|
||||
case *ObjectList:
|
||||
for i, item := range n.Items {
|
||||
n.Items[i] = Walk(item, fn).(*ObjectItem)
|
||||
}
|
||||
case *ObjectKey:
|
||||
// nothing to do
|
||||
case *ObjectItem:
|
||||
for i, k := range n.Keys {
|
||||
n.Keys[i] = Walk(k, fn).(*ObjectKey)
|
||||
}
|
||||
|
||||
if n.Val != nil {
|
||||
n.Val = Walk(n.Val, fn)
|
||||
}
|
||||
case *LiteralType:
|
||||
// nothing to do
|
||||
case *ListType:
|
||||
for i, l := range n.List {
|
||||
n.List[i] = Walk(l, fn)
|
||||
}
|
||||
case *ObjectType:
|
||||
n.List = Walk(n.List, fn).(*ObjectList)
|
||||
default:
|
||||
// should we panic here?
|
||||
fmt.Printf("unknown type: %T\n", n)
|
||||
}
|
||||
|
||||
fn(nil)
|
||||
return rewritten
|
||||
}
|
17
vendor/github.com/hashicorp/hcl/hcl/parser/error.go
generated
vendored
Normal file
17
vendor/github.com/hashicorp/hcl/hcl/parser/error.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
package parser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
// PosError is a parse error that contains a position.
|
||||
type PosError struct {
|
||||
Pos token.Pos
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *PosError) Error() string {
|
||||
return fmt.Sprintf("At %s: %s", e.Pos, e.Err)
|
||||
}
|
489
vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
generated
vendored
Normal file
489
vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
generated
vendored
Normal file
|
@ -0,0 +1,489 @@
|
|||
// Package parser implements a parser for HCL (HashiCorp Configuration
|
||||
// Language)
|
||||
package parser
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
"github.com/hashicorp/hcl/hcl/scanner"
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
type Parser struct {
|
||||
sc *scanner.Scanner
|
||||
|
||||
// Last read token
|
||||
tok token.Token
|
||||
commaPrev token.Token
|
||||
|
||||
comments []*ast.CommentGroup
|
||||
leadComment *ast.CommentGroup // last lead comment
|
||||
lineComment *ast.CommentGroup // last line comment
|
||||
|
||||
enableTrace bool
|
||||
indent int
|
||||
n int // buffer size (max = 1)
|
||||
}
|
||||
|
||||
func newParser(src []byte) *Parser {
|
||||
return &Parser{
|
||||
sc: scanner.New(src),
|
||||
}
|
||||
}
|
||||
|
||||
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||
func Parse(src []byte) (*ast.File, error) {
|
||||
p := newParser(src)
|
||||
return p.Parse()
|
||||
}
|
||||
|
||||
var errEofToken = errors.New("EOF token found")
|
||||
|
||||
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||
func (p *Parser) Parse() (*ast.File, error) {
|
||||
f := &ast.File{}
|
||||
var err, scerr error
|
||||
p.sc.Error = func(pos token.Pos, msg string) {
|
||||
scerr = &PosError{Pos: pos, Err: errors.New(msg)}
|
||||
}
|
||||
|
||||
f.Node, err = p.objectList()
|
||||
if scerr != nil {
|
||||
return nil, scerr
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f.Comments = p.comments
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (p *Parser) objectList() (*ast.ObjectList, error) {
|
||||
defer un(trace(p, "ParseObjectList"))
|
||||
node := &ast.ObjectList{}
|
||||
|
||||
for {
|
||||
n, err := p.objectItem()
|
||||
if err == errEofToken {
|
||||
break // we are finished
|
||||
}
|
||||
|
||||
// we don't return a nil node, because might want to use already
|
||||
// collected items.
|
||||
if err != nil {
|
||||
return node, err
|
||||
}
|
||||
|
||||
node.Add(n)
|
||||
|
||||
// object lists can be optionally comma-delimited e.g. when a list of maps
|
||||
// is being expressed, so a comma is allowed here - it's simply consumed
|
||||
tok := p.scan()
|
||||
if tok.Type != token.COMMA {
|
||||
p.unscan()
|
||||
}
|
||||
}
|
||||
return node, nil
|
||||
}
|
||||
|
||||
func (p *Parser) consumeComment() (comment *ast.Comment, endline int) {
|
||||
endline = p.tok.Pos.Line
|
||||
|
||||
// count the endline if it's multiline comment, ie starting with /*
|
||||
if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' {
|
||||
// don't use range here - no need to decode Unicode code points
|
||||
for i := 0; i < len(p.tok.Text); i++ {
|
||||
if p.tok.Text[i] == '\n' {
|
||||
endline++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text}
|
||||
p.tok = p.sc.Scan()
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
|
||||
var list []*ast.Comment
|
||||
endline = p.tok.Pos.Line
|
||||
|
||||
for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n {
|
||||
var comment *ast.Comment
|
||||
comment, endline = p.consumeComment()
|
||||
list = append(list, comment)
|
||||
}
|
||||
|
||||
// add comment group to the comments list
|
||||
comments = &ast.CommentGroup{List: list}
|
||||
p.comments = append(p.comments, comments)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// objectItem parses a single object item
|
||||
func (p *Parser) objectItem() (*ast.ObjectItem, error) {
|
||||
defer un(trace(p, "ParseObjectItem"))
|
||||
|
||||
keys, err := p.objectKey()
|
||||
if len(keys) > 0 && err == errEofToken {
|
||||
// We ignore eof token here since it is an error if we didn't
|
||||
// receive a value (but we did receive a key) for the item.
|
||||
err = nil
|
||||
}
|
||||
if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
|
||||
// This is a strange boolean statement, but what it means is:
|
||||
// We have keys with no value, and we're likely in an object
|
||||
// (since RBrace ends an object). For this, we set err to nil so
|
||||
// we continue and get the error below of having the wrong value
|
||||
// type.
|
||||
err = nil
|
||||
|
||||
// Reset the token type so we don't think it completed fine. See
|
||||
// objectType which uses p.tok.Type to check if we're done with
|
||||
// the object.
|
||||
p.tok.Type = token.EOF
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o := &ast.ObjectItem{
|
||||
Keys: keys,
|
||||
}
|
||||
|
||||
if p.leadComment != nil {
|
||||
o.LeadComment = p.leadComment
|
||||
p.leadComment = nil
|
||||
}
|
||||
|
||||
switch p.tok.Type {
|
||||
case token.ASSIGN:
|
||||
o.Assign = p.tok.Pos
|
||||
o.Val, err = p.object()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case token.LBRACE:
|
||||
o.Val, err = p.objectType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
keyStr := make([]string, 0, len(keys))
|
||||
for _, k := range keys {
|
||||
keyStr = append(keyStr, k.Token.Text)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf(
|
||||
"key '%s' expected start of object ('{') or assignment ('=')",
|
||||
strings.Join(keyStr, " "))
|
||||
}
|
||||
|
||||
// do a look-ahead for line comment
|
||||
p.scan()
|
||||
if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
|
||||
o.LineComment = p.lineComment
|
||||
p.lineComment = nil
|
||||
}
|
||||
p.unscan()
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// objectKey parses an object key and returns a ObjectKey AST
|
||||
func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
|
||||
keyCount := 0
|
||||
keys := make([]*ast.ObjectKey, 0)
|
||||
|
||||
for {
|
||||
tok := p.scan()
|
||||
switch tok.Type {
|
||||
case token.EOF:
|
||||
// It is very important to also return the keys here as well as
|
||||
// the error. This is because we need to be able to tell if we
|
||||
// did parse keys prior to finding the EOF, or if we just found
|
||||
// a bare EOF.
|
||||
return keys, errEofToken
|
||||
case token.ASSIGN:
|
||||
// assignment or object only, but not nested objects. this is not
|
||||
// allowed: `foo bar = {}`
|
||||
if keyCount > 1 {
|
||||
return nil, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
|
||||
}
|
||||
}
|
||||
|
||||
if keyCount == 0 {
|
||||
return nil, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: errors.New("no object keys found!"),
|
||||
}
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
case token.LBRACE:
|
||||
var err error
|
||||
|
||||
// If we have no keys, then it is a syntax error. i.e. {{}} is not
|
||||
// allowed.
|
||||
if len(keys) == 0 {
|
||||
err = &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
|
||||
}
|
||||
}
|
||||
|
||||
// object
|
||||
return keys, err
|
||||
case token.IDENT, token.STRING:
|
||||
keyCount++
|
||||
keys = append(keys, &ast.ObjectKey{Token: p.tok})
|
||||
case token.ILLEGAL:
|
||||
fmt.Println("illegal")
|
||||
default:
|
||||
return keys, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// object parses any type of object, such as number, bool, string, object or
|
||||
// list.
|
||||
func (p *Parser) object() (ast.Node, error) {
|
||||
defer un(trace(p, "ParseType"))
|
||||
tok := p.scan()
|
||||
|
||||
switch tok.Type {
|
||||
case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
|
||||
return p.literalType()
|
||||
case token.LBRACE:
|
||||
return p.objectType()
|
||||
case token.LBRACK:
|
||||
return p.listType()
|
||||
case token.COMMENT:
|
||||
// implement comment
|
||||
case token.EOF:
|
||||
return nil, errEofToken
|
||||
}
|
||||
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf("Unknown token: %+v", tok),
|
||||
}
|
||||
}
|
||||
|
||||
// objectType parses an object type and returns a ObjectType AST
|
||||
func (p *Parser) objectType() (*ast.ObjectType, error) {
|
||||
defer un(trace(p, "ParseObjectType"))
|
||||
|
||||
// we assume that the currently scanned token is a LBRACE
|
||||
o := &ast.ObjectType{
|
||||
Lbrace: p.tok.Pos,
|
||||
}
|
||||
|
||||
l, err := p.objectList()
|
||||
|
||||
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
|
||||
// not a RBRACE, it's an syntax error and we just return it.
|
||||
if err != nil && p.tok.Type != token.RBRACE {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If there is no error, we should be at a RBRACE to end the object
|
||||
if p.tok.Type != token.RBRACE {
|
||||
return nil, fmt.Errorf("object expected closing RBRACE got: %s", p.tok.Type)
|
||||
}
|
||||
|
||||
o.List = l
|
||||
o.Rbrace = p.tok.Pos // advanced via parseObjectList
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// listType parses a list type and returns a ListType AST
|
||||
func (p *Parser) listType() (*ast.ListType, error) {
|
||||
defer un(trace(p, "ParseListType"))
|
||||
|
||||
// we assume that the currently scanned token is a LBRACK
|
||||
l := &ast.ListType{
|
||||
Lbrack: p.tok.Pos,
|
||||
}
|
||||
|
||||
needComma := false
|
||||
for {
|
||||
tok := p.scan()
|
||||
if needComma {
|
||||
switch tok.Type {
|
||||
case token.COMMA, token.RBRACK:
|
||||
default:
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf(
|
||||
"error parsing list, expected comma or list end, got: %s",
|
||||
tok.Type),
|
||||
}
|
||||
}
|
||||
}
|
||||
switch tok.Type {
|
||||
case token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
|
||||
node, err := p.literalType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l.Add(node)
|
||||
needComma = true
|
||||
case token.COMMA:
|
||||
// get next list item or we are at the end
|
||||
// do a look-ahead for line comment
|
||||
p.scan()
|
||||
if p.lineComment != nil && len(l.List) > 0 {
|
||||
lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
|
||||
if ok {
|
||||
lit.LineComment = p.lineComment
|
||||
l.List[len(l.List)-1] = lit
|
||||
p.lineComment = nil
|
||||
}
|
||||
}
|
||||
p.unscan()
|
||||
|
||||
needComma = false
|
||||
continue
|
||||
case token.LBRACE:
|
||||
// Looks like a nested object, so parse it out
|
||||
node, err := p.objectType()
|
||||
if err != nil {
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf(
|
||||
"error while trying to parse object within list: %s", err),
|
||||
}
|
||||
}
|
||||
l.Add(node)
|
||||
needComma = true
|
||||
case token.BOOL:
|
||||
// TODO(arslan) should we support? not supported by HCL yet
|
||||
case token.LBRACK:
|
||||
// TODO(arslan) should we support nested lists? Even though it's
|
||||
// written in README of HCL, it's not a part of the grammar
|
||||
// (not defined in parse.y)
|
||||
case token.RBRACK:
|
||||
// finished
|
||||
l.Rbrack = p.tok.Pos
|
||||
return l, nil
|
||||
default:
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// literalType parses a literal type and returns a LiteralType AST
|
||||
func (p *Parser) literalType() (*ast.LiteralType, error) {
|
||||
defer un(trace(p, "ParseLiteral"))
|
||||
|
||||
return &ast.LiteralType{
|
||||
Token: p.tok,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// scan returns the next token from the underlying scanner. If a token has
|
||||
// been unscanned then read that instead. In the process, it collects any
|
||||
// comment groups encountered, and remembers the last lead and line comments.
|
||||
func (p *Parser) scan() token.Token {
|
||||
// If we have a token on the buffer, then return it.
|
||||
if p.n != 0 {
|
||||
p.n = 0
|
||||
return p.tok
|
||||
}
|
||||
|
||||
// Otherwise read the next token from the scanner and Save it to the buffer
|
||||
// in case we unscan later.
|
||||
prev := p.tok
|
||||
p.tok = p.sc.Scan()
|
||||
|
||||
if p.tok.Type == token.COMMENT {
|
||||
var comment *ast.CommentGroup
|
||||
var endline int
|
||||
|
||||
// fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n",
|
||||
// p.tok.Pos.Line, prev.Pos.Line, endline)
|
||||
if p.tok.Pos.Line == prev.Pos.Line {
|
||||
// The comment is on same line as the previous token; it
|
||||
// cannot be a lead comment but may be a line comment.
|
||||
comment, endline = p.consumeCommentGroup(0)
|
||||
if p.tok.Pos.Line != endline {
|
||||
// The next token is on a different line, thus
|
||||
// the last comment group is a line comment.
|
||||
p.lineComment = comment
|
||||
}
|
||||
}
|
||||
|
||||
// consume successor comments, if any
|
||||
endline = -1
|
||||
for p.tok.Type == token.COMMENT {
|
||||
comment, endline = p.consumeCommentGroup(1)
|
||||
}
|
||||
|
||||
if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE {
|
||||
switch p.tok.Type {
|
||||
case token.RBRACE, token.RBRACK:
|
||||
// Do not count for these cases
|
||||
default:
|
||||
// The next token is following on the line immediately after the
|
||||
// comment group, thus the last comment group is a lead comment.
|
||||
p.leadComment = comment
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return p.tok
|
||||
}
|
||||
|
||||
// unscan pushes the previously read token back onto the buffer.
|
||||
func (p *Parser) unscan() {
|
||||
p.n = 1
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Parsing support
|
||||
|
||||
func (p *Parser) printTrace(a ...interface{}) {
|
||||
if !p.enableTrace {
|
||||
return
|
||||
}
|
||||
|
||||
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
|
||||
const n = len(dots)
|
||||
fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
|
||||
|
||||
i := 2 * p.indent
|
||||
for i > n {
|
||||
fmt.Print(dots)
|
||||
i -= n
|
||||
}
|
||||
// i <= n
|
||||
fmt.Print(dots[0:i])
|
||||
fmt.Println(a...)
|
||||
}
|
||||
|
||||
func trace(p *Parser, msg string) *Parser {
|
||||
p.printTrace(msg, "(")
|
||||
p.indent++
|
||||
return p
|
||||
}
|
||||
|
||||
// Usage pattern: defer un(trace(p, "..."))
|
||||
func un(p *Parser) {
|
||||
p.indent--
|
||||
p.printTrace(")")
|
||||
}
|
645
vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
generated
vendored
Normal file
645
vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
generated
vendored
Normal file
|
@ -0,0 +1,645 @@
|
|||
// Package scanner implements a scanner for HCL (HashiCorp Configuration
|
||||
// Language) source text.
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
// eof represents a marker rune for the end of the reader.
|
||||
const eof = rune(0)
|
||||
|
||||
// Scanner defines a lexical scanner
|
||||
type Scanner struct {
|
||||
buf *bytes.Buffer // Source buffer for advancing and scanning
|
||||
src []byte // Source buffer for immutable access
|
||||
|
||||
// Source Position
|
||||
srcPos token.Pos // current position
|
||||
prevPos token.Pos // previous position, used for peek() method
|
||||
|
||||
lastCharLen int // length of last character in bytes
|
||||
lastLineLen int // length of last line in characters (for correct column reporting)
|
||||
|
||||
tokStart int // token text start position
|
||||
tokEnd int // token text end position
|
||||
|
||||
// Error is called for each error encountered. If no Error
|
||||
// function is set, the error is reported to os.Stderr.
|
||||
Error func(pos token.Pos, msg string)
|
||||
|
||||
// ErrorCount is incremented by one for each error encountered.
|
||||
ErrorCount int
|
||||
|
||||
// tokPos is the start position of most recently scanned token; set by
|
||||
// Scan. The Filename field is always left untouched by the Scanner. If
|
||||
// an error is reported (via Error) and Position is invalid, the scanner is
|
||||
// not inside a token.
|
||||
tokPos token.Pos
|
||||
}
|
||||
|
||||
// New creates and initializes a new instance of Scanner using src as
|
||||
// its source content.
|
||||
func New(src []byte) *Scanner {
|
||||
// even though we accept a src, we read from a io.Reader compatible type
|
||||
// (*bytes.Buffer). So in the future we might easily change it to streaming
|
||||
// read.
|
||||
b := bytes.NewBuffer(src)
|
||||
s := &Scanner{
|
||||
buf: b,
|
||||
src: src,
|
||||
}
|
||||
|
||||
// srcPosition always starts with 1
|
||||
s.srcPos.Line = 1
|
||||
return s
|
||||
}
|
||||
|
||||
// next reads the next rune from the bufferred reader. Returns the rune(0) if
|
||||
// an error occurs (or io.EOF is returned).
|
||||
func (s *Scanner) next() rune {
|
||||
ch, size, err := s.buf.ReadRune()
|
||||
if err != nil {
|
||||
// advance for error reporting
|
||||
s.srcPos.Column++
|
||||
s.srcPos.Offset += size
|
||||
s.lastCharLen = size
|
||||
return eof
|
||||
}
|
||||
|
||||
if ch == utf8.RuneError && size == 1 {
|
||||
s.srcPos.Column++
|
||||
s.srcPos.Offset += size
|
||||
s.lastCharLen = size
|
||||
s.err("illegal UTF-8 encoding")
|
||||
return ch
|
||||
}
|
||||
|
||||
// remember last position
|
||||
s.prevPos = s.srcPos
|
||||
|
||||
s.srcPos.Column++
|
||||
s.lastCharLen = size
|
||||
s.srcPos.Offset += size
|
||||
|
||||
if ch == '\n' {
|
||||
s.srcPos.Line++
|
||||
s.lastLineLen = s.srcPos.Column
|
||||
s.srcPos.Column = 0
|
||||
}
|
||||
|
||||
// debug
|
||||
// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
|
||||
return ch
|
||||
}
|
||||
|
||||
// unread unreads the previous read Rune and updates the source position
|
||||
func (s *Scanner) unread() {
|
||||
if err := s.buf.UnreadRune(); err != nil {
|
||||
panic(err) // this is user fault, we should catch it
|
||||
}
|
||||
s.srcPos = s.prevPos // put back last position
|
||||
}
|
||||
|
||||
// peek returns the next rune without advancing the reader.
|
||||
func (s *Scanner) peek() rune {
|
||||
peek, _, err := s.buf.ReadRune()
|
||||
if err != nil {
|
||||
return eof
|
||||
}
|
||||
|
||||
s.buf.UnreadRune()
|
||||
return peek
|
||||
}
|
||||
|
||||
// Scan scans the next token and returns the token.
|
||||
func (s *Scanner) Scan() token.Token {
|
||||
ch := s.next()
|
||||
|
||||
// skip white space
|
||||
for isWhitespace(ch) {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
var tok token.Type
|
||||
|
||||
// token text markings
|
||||
s.tokStart = s.srcPos.Offset - s.lastCharLen
|
||||
|
||||
// token position, initial next() is moving the offset by one(size of rune
|
||||
// actually), though we are interested with the starting point
|
||||
s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
|
||||
if s.srcPos.Column > 0 {
|
||||
// common case: last character was not a '\n'
|
||||
s.tokPos.Line = s.srcPos.Line
|
||||
s.tokPos.Column = s.srcPos.Column
|
||||
} else {
|
||||
// last character was a '\n'
|
||||
// (we cannot be at the beginning of the source
|
||||
// since we have called next() at least once)
|
||||
s.tokPos.Line = s.srcPos.Line - 1
|
||||
s.tokPos.Column = s.lastLineLen
|
||||
}
|
||||
|
||||
switch {
|
||||
case isLetter(ch):
|
||||
tok = token.IDENT
|
||||
lit := s.scanIdentifier()
|
||||
if lit == "true" || lit == "false" {
|
||||
tok = token.BOOL
|
||||
}
|
||||
case isDecimal(ch):
|
||||
tok = s.scanNumber(ch)
|
||||
default:
|
||||
switch ch {
|
||||
case eof:
|
||||
tok = token.EOF
|
||||
case '"':
|
||||
tok = token.STRING
|
||||
s.scanString()
|
||||
case '#', '/':
|
||||
tok = token.COMMENT
|
||||
s.scanComment(ch)
|
||||
case '.':
|
||||
tok = token.PERIOD
|
||||
ch = s.peek()
|
||||
if isDecimal(ch) {
|
||||
tok = token.FLOAT
|
||||
ch = s.scanMantissa(ch)
|
||||
ch = s.scanExponent(ch)
|
||||
}
|
||||
case '<':
|
||||
tok = token.HEREDOC
|
||||
s.scanHeredoc()
|
||||
case '[':
|
||||
tok = token.LBRACK
|
||||
case ']':
|
||||
tok = token.RBRACK
|
||||
case '{':
|
||||
tok = token.LBRACE
|
||||
case '}':
|
||||
tok = token.RBRACE
|
||||
case ',':
|
||||
tok = token.COMMA
|
||||
case '=':
|
||||
tok = token.ASSIGN
|
||||
case '+':
|
||||
tok = token.ADD
|
||||
case '-':
|
||||
if isDecimal(s.peek()) {
|
||||
ch := s.next()
|
||||
tok = s.scanNumber(ch)
|
||||
} else {
|
||||
tok = token.SUB
|
||||
}
|
||||
default:
|
||||
s.err("illegal char")
|
||||
}
|
||||
}
|
||||
|
||||
// finish token ending
|
||||
s.tokEnd = s.srcPos.Offset
|
||||
|
||||
// create token literal
|
||||
var tokenText string
|
||||
if s.tokStart >= 0 {
|
||||
tokenText = string(s.src[s.tokStart:s.tokEnd])
|
||||
}
|
||||
s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
|
||||
|
||||
return token.Token{
|
||||
Type: tok,
|
||||
Pos: s.tokPos,
|
||||
Text: tokenText,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scanner) scanComment(ch rune) {
|
||||
// single line comments
|
||||
if ch == '#' || (ch == '/' && s.peek() != '*') {
|
||||
if ch == '/' && s.peek() != '/' {
|
||||
s.err("expected '/' for comment")
|
||||
return
|
||||
}
|
||||
|
||||
ch = s.next()
|
||||
for ch != '\n' && ch >= 0 && ch != eof {
|
||||
ch = s.next()
|
||||
}
|
||||
if ch != eof && ch >= 0 {
|
||||
s.unread()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// be sure we get the character after /* This allows us to find comment's
|
||||
// that are not erminated
|
||||
if ch == '/' {
|
||||
s.next()
|
||||
ch = s.next() // read character after "/*"
|
||||
}
|
||||
|
||||
// look for /* - style comments
|
||||
for {
|
||||
if ch < 0 || ch == eof {
|
||||
s.err("comment not terminated")
|
||||
break
|
||||
}
|
||||
|
||||
ch0 := ch
|
||||
ch = s.next()
|
||||
if ch0 == '*' && ch == '/' {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanNumber scans a HCL number definition starting with the given rune
|
||||
func (s *Scanner) scanNumber(ch rune) token.Type {
|
||||
if ch == '0' {
|
||||
// check for hexadecimal, octal or float
|
||||
ch = s.next()
|
||||
if ch == 'x' || ch == 'X' {
|
||||
// hexadecimal
|
||||
ch = s.next()
|
||||
found := false
|
||||
for isHexadecimal(ch) {
|
||||
ch = s.next()
|
||||
found = true
|
||||
}
|
||||
|
||||
if !found {
|
||||
s.err("illegal hexadecimal number")
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
|
||||
return token.NUMBER
|
||||
}
|
||||
|
||||
// now it's either something like: 0421(octal) or 0.1231(float)
|
||||
illegalOctal := false
|
||||
for isDecimal(ch) {
|
||||
ch = s.next()
|
||||
if ch == '8' || ch == '9' {
|
||||
// this is just a possibility. For example 0159 is illegal, but
|
||||
// 0159.23 is valid. So we mark a possible illegal octal. If
|
||||
// the next character is not a period, we'll print the error.
|
||||
illegalOctal = true
|
||||
}
|
||||
}
|
||||
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.scanExponent(ch)
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if ch == '.' {
|
||||
ch = s.scanFraction(ch)
|
||||
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.next()
|
||||
ch = s.scanExponent(ch)
|
||||
}
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if illegalOctal {
|
||||
s.err("illegal octal number")
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
return token.NUMBER
|
||||
}
|
||||
|
||||
s.scanMantissa(ch)
|
||||
ch = s.next() // seek forward
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.scanExponent(ch)
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if ch == '.' {
|
||||
ch = s.scanFraction(ch)
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.next()
|
||||
ch = s.scanExponent(ch)
|
||||
}
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
return token.NUMBER
|
||||
}
|
||||
|
||||
// scanMantissa scans the mantissa begining from the rune. It returns the next
|
||||
// non decimal rune. It's used to determine wheter it's a fraction or exponent.
|
||||
func (s *Scanner) scanMantissa(ch rune) rune {
|
||||
scanned := false
|
||||
for isDecimal(ch) {
|
||||
ch = s.next()
|
||||
scanned = true
|
||||
}
|
||||
|
||||
if scanned && ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanFraction scans the fraction after the '.' rune
|
||||
func (s *Scanner) scanFraction(ch rune) rune {
|
||||
if ch == '.' {
|
||||
ch = s.peek() // we peek just to see if we can move forward
|
||||
ch = s.scanMantissa(ch)
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
|
||||
// rune.
|
||||
func (s *Scanner) scanExponent(ch rune) rune {
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.next()
|
||||
if ch == '-' || ch == '+' {
|
||||
ch = s.next()
|
||||
}
|
||||
ch = s.scanMantissa(ch)
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanHeredoc scans a heredoc string
|
||||
func (s *Scanner) scanHeredoc() {
|
||||
// Scan the second '<' in example: '<<EOF'
|
||||
if s.next() != '<' {
|
||||
s.err("heredoc expected second '<', didn't see it")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the original offset so we can read just the heredoc ident
|
||||
offs := s.srcPos.Offset
|
||||
|
||||
// Scan the identifier
|
||||
ch := s.next()
|
||||
|
||||
// Indented heredoc syntax
|
||||
if ch == '-' {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
for isLetter(ch) || isDigit(ch) {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
// If we reached an EOF then that is not good
|
||||
if ch == eof {
|
||||
s.err("heredoc not terminated")
|
||||
return
|
||||
}
|
||||
|
||||
// Ignore the '\r' in Windows line endings
|
||||
if ch == '\r' {
|
||||
if s.peek() == '\n' {
|
||||
ch = s.next()
|
||||
}
|
||||
}
|
||||
|
||||
// If we didn't reach a newline then that is also not good
|
||||
if ch != '\n' {
|
||||
s.err("invalid characters in heredoc anchor")
|
||||
return
|
||||
}
|
||||
|
||||
// Read the identifier
|
||||
identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
|
||||
if len(identBytes) == 0 {
|
||||
s.err("zero-length heredoc anchor")
|
||||
return
|
||||
}
|
||||
|
||||
var identRegexp *regexp.Regexp
|
||||
if identBytes[0] == '-' {
|
||||
identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes[1:]))
|
||||
} else {
|
||||
identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes))
|
||||
}
|
||||
|
||||
// Read the actual string value
|
||||
lineStart := s.srcPos.Offset
|
||||
for {
|
||||
ch := s.next()
|
||||
|
||||
// Special newline handling.
|
||||
if ch == '\n' {
|
||||
// Math is fast, so we first compare the byte counts to see if we have a chance
|
||||
// of seeing the same identifier - if the length is less than the number of bytes
|
||||
// in the identifier, this cannot be a valid terminator.
|
||||
lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart
|
||||
if lineBytesLen >= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
|
||||
break
|
||||
}
|
||||
|
||||
// Not an anchor match, record the start of a new line
|
||||
lineStart = s.srcPos.Offset
|
||||
}
|
||||
|
||||
if ch == eof {
|
||||
s.err("heredoc not terminated")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// scanString scans a quoted string
|
||||
func (s *Scanner) scanString() {
|
||||
braces := 0
|
||||
for {
|
||||
// '"' opening already consumed
|
||||
// read character after quote
|
||||
ch := s.next()
|
||||
|
||||
if ch < 0 || ch == eof {
|
||||
s.err("literal not terminated")
|
||||
return
|
||||
}
|
||||
|
||||
if ch == '"' && braces == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// If we're going into a ${} then we can ignore quotes for awhile
|
||||
if braces == 0 && ch == '$' && s.peek() == '{' {
|
||||
braces++
|
||||
s.next()
|
||||
} else if braces > 0 && ch == '{' {
|
||||
braces++
|
||||
}
|
||||
if braces > 0 && ch == '}' {
|
||||
braces--
|
||||
}
|
||||
|
||||
if ch == '\\' {
|
||||
s.scanEscape()
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// scanEscape scans an escape sequence
|
||||
func (s *Scanner) scanEscape() rune {
|
||||
// http://en.cppreference.com/w/cpp/language/escape
|
||||
ch := s.next() // read character after '/'
|
||||
switch ch {
|
||||
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
|
||||
// nothing to do
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
// octal notation
|
||||
ch = s.scanDigits(ch, 8, 3)
|
||||
case 'x':
|
||||
// hexademical notation
|
||||
ch = s.scanDigits(s.next(), 16, 2)
|
||||
case 'u':
|
||||
// universal character name
|
||||
ch = s.scanDigits(s.next(), 16, 4)
|
||||
case 'U':
|
||||
// universal character name
|
||||
ch = s.scanDigits(s.next(), 16, 8)
|
||||
default:
|
||||
s.err("illegal char escape")
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanDigits scans a rune with the given base for n times. For example an
|
||||
// octal notation \184 would yield in scanDigits(ch, 8, 3)
|
||||
func (s *Scanner) scanDigits(ch rune, base, n int) rune {
|
||||
start := n
|
||||
for n > 0 && digitVal(ch) < base {
|
||||
ch = s.next()
|
||||
if ch == eof {
|
||||
// If we see an EOF, we halt any more scanning of digits
|
||||
// immediately.
|
||||
break
|
||||
}
|
||||
|
||||
n--
|
||||
}
|
||||
if n > 0 {
|
||||
s.err("illegal char escape")
|
||||
}
|
||||
|
||||
if n != start {
|
||||
// we scanned all digits, put the last non digit char back,
|
||||
// only if we read anything at all
|
||||
s.unread()
|
||||
}
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanIdentifier scans an identifier and returns the literal string
|
||||
func (s *Scanner) scanIdentifier() string {
|
||||
offs := s.srcPos.Offset - s.lastCharLen
|
||||
ch := s.next()
|
||||
for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread() // we got identifier, put back latest char
|
||||
}
|
||||
|
||||
return string(s.src[offs:s.srcPos.Offset])
|
||||
}
|
||||
|
||||
// recentPosition returns the position of the character immediately after the
|
||||
// character or token returned by the last call to Scan.
|
||||
func (s *Scanner) recentPosition() (pos token.Pos) {
|
||||
pos.Offset = s.srcPos.Offset - s.lastCharLen
|
||||
switch {
|
||||
case s.srcPos.Column > 0:
|
||||
// common case: last character was not a '\n'
|
||||
pos.Line = s.srcPos.Line
|
||||
pos.Column = s.srcPos.Column
|
||||
case s.lastLineLen > 0:
|
||||
// last character was a '\n'
|
||||
// (we cannot be at the beginning of the source
|
||||
// since we have called next() at least once)
|
||||
pos.Line = s.srcPos.Line - 1
|
||||
pos.Column = s.lastLineLen
|
||||
default:
|
||||
// at the beginning of the source
|
||||
pos.Line = 1
|
||||
pos.Column = 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// err prints the error of any scanning to s.Error function. If the function is
|
||||
// not defined, by default it prints them to os.Stderr
|
||||
func (s *Scanner) err(msg string) {
|
||||
s.ErrorCount++
|
||||
pos := s.recentPosition()
|
||||
|
||||
if s.Error != nil {
|
||||
s.Error(pos, msg)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is a letter
|
||||
func isLetter(ch rune) bool {
|
||||
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
|
||||
}
|
||||
|
||||
// isDigit returns true if the given rune is a decimal digit
|
||||
func isDigit(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
|
||||
}
|
||||
|
||||
// isDecimal returns true if the given rune is a decimal number
|
||||
func isDecimal(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9'
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is an hexadecimal number
|
||||
func isHexadecimal(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
|
||||
}
|
||||
|
||||
// isWhitespace returns true if the rune is a space, tab, newline or carriage return
|
||||
func isWhitespace(ch rune) bool {
|
||||
return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
|
||||
}
|
||||
|
||||
// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
|
||||
func digitVal(ch rune) int {
|
||||
switch {
|
||||
case '0' <= ch && ch <= '9':
|
||||
return int(ch - '0')
|
||||
case 'a' <= ch && ch <= 'f':
|
||||
return int(ch - 'a' + 10)
|
||||
case 'A' <= ch && ch <= 'F':
|
||||
return int(ch - 'A' + 10)
|
||||
}
|
||||
return 16 // larger than any legal digit val
|
||||
}
|
244
vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
generated
vendored
Normal file
244
vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
generated
vendored
Normal file
|
@ -0,0 +1,244 @@
|
|||
package strconv
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// ErrSyntax indicates that a value does not have the right syntax for the target type.
|
||||
var ErrSyntax = errors.New("invalid syntax")
|
||||
|
||||
// Unquote interprets s as a single-quoted, double-quoted,
|
||||
// or backquoted Go string literal, returning the string value
|
||||
// that s quotes. (If s is single-quoted, it would be a Go
|
||||
// character literal; Unquote returns the corresponding
|
||||
// one-character string.)
|
||||
func Unquote(s string) (t string, err error) {
|
||||
n := len(s)
|
||||
if n < 2 {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
quote := s[0]
|
||||
if quote != s[n-1] {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
s = s[1 : n-1]
|
||||
|
||||
if quote != '"' {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
|
||||
// Is it trivial? Avoid allocation.
|
||||
if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
|
||||
switch quote {
|
||||
case '"':
|
||||
return s, nil
|
||||
case '\'':
|
||||
r, size := utf8.DecodeRuneInString(s)
|
||||
if size == len(s) && (r != utf8.RuneError || size != 1) {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var runeTmp [utf8.UTFMax]byte
|
||||
buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
|
||||
for len(s) > 0 {
|
||||
// If we're starting a '${}' then let it through un-unquoted.
|
||||
// Specifically: we don't unquote any characters within the `${}`
|
||||
// section, except for escaped backslashes, which we handle specifically.
|
||||
if s[0] == '$' && len(s) > 1 && s[1] == '{' {
|
||||
buf = append(buf, '$', '{')
|
||||
s = s[2:]
|
||||
|
||||
// Continue reading until we find the closing brace, copying as-is
|
||||
braces := 1
|
||||
for len(s) > 0 && braces > 0 {
|
||||
r, size := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
|
||||
s = s[size:]
|
||||
|
||||
// We special case escaped backslashes in interpolations, converting
|
||||
// them to their unescaped equivalents.
|
||||
if r == '\\' {
|
||||
q, _ := utf8.DecodeRuneInString(s)
|
||||
switch q {
|
||||
case '\\':
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
n := utf8.EncodeRune(runeTmp[:], r)
|
||||
buf = append(buf, runeTmp[:n]...)
|
||||
|
||||
switch r {
|
||||
case '{':
|
||||
braces++
|
||||
case '}':
|
||||
braces--
|
||||
}
|
||||
}
|
||||
if braces != 0 {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
if len(s) == 0 {
|
||||
// If there's no string left, we're done!
|
||||
break
|
||||
} else {
|
||||
// If there's more left, we need to pop back up to the top of the loop
|
||||
// in case there's another interpolation in this string.
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
c, multibyte, ss, err := unquoteChar(s, quote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
s = ss
|
||||
if c < utf8.RuneSelf || !multibyte {
|
||||
buf = append(buf, byte(c))
|
||||
} else {
|
||||
n := utf8.EncodeRune(runeTmp[:], c)
|
||||
buf = append(buf, runeTmp[:n]...)
|
||||
}
|
||||
if quote == '\'' && len(s) != 0 {
|
||||
// single-quoted must be single character
|
||||
return "", ErrSyntax
|
||||
}
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// contains reports whether the string contains the byte c.
|
||||
func contains(s string, c byte) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] == c {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func unhex(b byte) (v rune, ok bool) {
|
||||
c := rune(b)
|
||||
switch {
|
||||
case '0' <= c && c <= '9':
|
||||
return c - '0', true
|
||||
case 'a' <= c && c <= 'f':
|
||||
return c - 'a' + 10, true
|
||||
case 'A' <= c && c <= 'F':
|
||||
return c - 'A' + 10, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
|
||||
// easy cases
|
||||
switch c := s[0]; {
|
||||
case c == quote && (quote == '\'' || quote == '"'):
|
||||
err = ErrSyntax
|
||||
return
|
||||
case c >= utf8.RuneSelf:
|
||||
r, size := utf8.DecodeRuneInString(s)
|
||||
return r, true, s[size:], nil
|
||||
case c != '\\':
|
||||
return rune(s[0]), false, s[1:], nil
|
||||
}
|
||||
|
||||
// hard case: c is backslash
|
||||
if len(s) <= 1 {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
c := s[1]
|
||||
s = s[2:]
|
||||
|
||||
switch c {
|
||||
case 'a':
|
||||
value = '\a'
|
||||
case 'b':
|
||||
value = '\b'
|
||||
case 'f':
|
||||
value = '\f'
|
||||
case 'n':
|
||||
value = '\n'
|
||||
case 'r':
|
||||
value = '\r'
|
||||
case 't':
|
||||
value = '\t'
|
||||
case 'v':
|
||||
value = '\v'
|
||||
case 'x', 'u', 'U':
|
||||
n := 0
|
||||
switch c {
|
||||
case 'x':
|
||||
n = 2
|
||||
case 'u':
|
||||
n = 4
|
||||
case 'U':
|
||||
n = 8
|
||||
}
|
||||
var v rune
|
||||
if len(s) < n {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
for j := 0; j < n; j++ {
|
||||
x, ok := unhex(s[j])
|
||||
if !ok {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
v = v<<4 | x
|
||||
}
|
||||
s = s[n:]
|
||||
if c == 'x' {
|
||||
// single-byte string, possibly not UTF-8
|
||||
value = v
|
||||
break
|
||||
}
|
||||
if v > utf8.MaxRune {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
value = v
|
||||
multibyte = true
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
v := rune(c) - '0'
|
||||
if len(s) < 2 {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
for j := 0; j < 2; j++ { // one digit already; two more
|
||||
x := rune(s[j]) - '0'
|
||||
if x < 0 || x > 7 {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
v = (v << 3) | x
|
||||
}
|
||||
s = s[2:]
|
||||
if v > 255 {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
value = v
|
||||
case '\\':
|
||||
value = '\\'
|
||||
case '\'', '"':
|
||||
if c != quote {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
value = rune(c)
|
||||
default:
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
tail = s
|
||||
return
|
||||
}
|
46
vendor/github.com/hashicorp/hcl/hcl/token/position.go
generated
vendored
Normal file
46
vendor/github.com/hashicorp/hcl/hcl/token/position.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
package token
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Pos describes an arbitrary source position
|
||||
// including the file, line, and column location.
|
||||
// A Position is valid if the line number is > 0.
|
||||
type Pos struct {
|
||||
Filename string // filename, if any
|
||||
Offset int // offset, starting at 0
|
||||
Line int // line number, starting at 1
|
||||
Column int // column number, starting at 1 (character count)
|
||||
}
|
||||
|
||||
// IsValid returns true if the position is valid.
|
||||
func (p *Pos) IsValid() bool { return p.Line > 0 }
|
||||
|
||||
// String returns a string in one of several forms:
|
||||
//
|
||||
// file:line:column valid position with file name
|
||||
// line:column valid position without file name
|
||||
// file invalid position with file name
|
||||
// - invalid position without file name
|
||||
func (p Pos) String() string {
|
||||
s := p.Filename
|
||||
if p.IsValid() {
|
||||
if s != "" {
|
||||
s += ":"
|
||||
}
|
||||
s += fmt.Sprintf("%d:%d", p.Line, p.Column)
|
||||
}
|
||||
if s == "" {
|
||||
s = "-"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Before reports whether the position p is before u.
|
||||
func (p Pos) Before(u Pos) bool {
|
||||
return u.Offset > p.Offset || u.Line > p.Line
|
||||
}
|
||||
|
||||
// After reports whether the position p is after u.
|
||||
func (p Pos) After(u Pos) bool {
|
||||
return u.Offset < p.Offset || u.Line < p.Line
|
||||
}
|
219
vendor/github.com/hashicorp/hcl/hcl/token/token.go
generated
vendored
Normal file
219
vendor/github.com/hashicorp/hcl/hcl/token/token.go
generated
vendored
Normal file
|
@ -0,0 +1,219 @@
|
|||
// Package token defines constants representing the lexical tokens for HCL
|
||||
// (HashiCorp Configuration Language)
|
||||
package token
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
hclstrconv "github.com/hashicorp/hcl/hcl/strconv"
|
||||
)
|
||||
|
||||
// Token defines a single HCL token which can be obtained via the Scanner
|
||||
type Token struct {
|
||||
Type Type
|
||||
Pos Pos
|
||||
Text string
|
||||
JSON bool
|
||||
}
|
||||
|
||||
// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
|
||||
type Type int
|
||||
|
||||
const (
|
||||
// Special tokens
|
||||
ILLEGAL Type = iota
|
||||
EOF
|
||||
COMMENT
|
||||
|
||||
identifier_beg
|
||||
IDENT // literals
|
||||
literal_beg
|
||||
NUMBER // 12345
|
||||
FLOAT // 123.45
|
||||
BOOL // true,false
|
||||
STRING // "abc"
|
||||
HEREDOC // <<FOO\nbar\nFOO
|
||||
literal_end
|
||||
identifier_end
|
||||
|
||||
operator_beg
|
||||
LBRACK // [
|
||||
LBRACE // {
|
||||
COMMA // ,
|
||||
PERIOD // .
|
||||
|
||||
RBRACK // ]
|
||||
RBRACE // }
|
||||
|
||||
ASSIGN // =
|
||||
ADD // +
|
||||
SUB // -
|
||||
operator_end
|
||||
)
|
||||
|
||||
var tokens = [...]string{
|
||||
ILLEGAL: "ILLEGAL",
|
||||
|
||||
EOF: "EOF",
|
||||
COMMENT: "COMMENT",
|
||||
|
||||
IDENT: "IDENT",
|
||||
NUMBER: "NUMBER",
|
||||
FLOAT: "FLOAT",
|
||||
BOOL: "BOOL",
|
||||
STRING: "STRING",
|
||||
|
||||
LBRACK: "LBRACK",
|
||||
LBRACE: "LBRACE",
|
||||
COMMA: "COMMA",
|
||||
PERIOD: "PERIOD",
|
||||
HEREDOC: "HEREDOC",
|
||||
|
||||
RBRACK: "RBRACK",
|
||||
RBRACE: "RBRACE",
|
||||
|
||||
ASSIGN: "ASSIGN",
|
||||
ADD: "ADD",
|
||||
SUB: "SUB",
|
||||
}
|
||||
|
||||
// String returns the string corresponding to the token tok.
|
||||
func (t Type) String() string {
|
||||
s := ""
|
||||
if 0 <= t && t < Type(len(tokens)) {
|
||||
s = tokens[t]
|
||||
}
|
||||
if s == "" {
|
||||
s = "token(" + strconv.Itoa(int(t)) + ")"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// IsIdentifier returns true for tokens corresponding to identifiers and basic
|
||||
// type literals; it returns false otherwise.
|
||||
func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
|
||||
|
||||
// IsLiteral returns true for tokens corresponding to basic type literals; it
|
||||
// returns false otherwise.
|
||||
func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
|
||||
|
||||
// IsOperator returns true for tokens corresponding to operators and
|
||||
// delimiters; it returns false otherwise.
|
||||
func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
|
||||
|
||||
// String returns the token's literal text. Note that this is only
|
||||
// applicable for certain token types, such as token.IDENT,
|
||||
// token.STRING, etc..
|
||||
func (t Token) String() string {
|
||||
return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
|
||||
}
|
||||
|
||||
// Value returns the properly typed value for this token. The type of
|
||||
// the returned interface{} is guaranteed based on the Type field.
|
||||
//
|
||||
// This can only be called for literal types. If it is called for any other
|
||||
// type, this will panic.
|
||||
func (t Token) Value() interface{} {
|
||||
switch t.Type {
|
||||
case BOOL:
|
||||
if t.Text == "true" {
|
||||
return true
|
||||
} else if t.Text == "false" {
|
||||
return false
|
||||
}
|
||||
|
||||
panic("unknown bool value: " + t.Text)
|
||||
case FLOAT:
|
||||
v, err := strconv.ParseFloat(t.Text, 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return float64(v)
|
||||
case NUMBER:
|
||||
v, err := strconv.ParseInt(t.Text, 0, 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return int64(v)
|
||||
case IDENT:
|
||||
return t.Text
|
||||
case HEREDOC:
|
||||
return unindentHeredoc(t.Text)
|
||||
case STRING:
|
||||
// Determine the Unquote method to use. If it came from JSON,
|
||||
// then we need to use the built-in unquote since we have to
|
||||
// escape interpolations there.
|
||||
f := hclstrconv.Unquote
|
||||
if t.JSON {
|
||||
f = strconv.Unquote
|
||||
}
|
||||
|
||||
// This case occurs if json null is used
|
||||
if t.Text == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
v, err := f(t.Text)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unquote %s err: %s", t.Text, err))
|
||||
}
|
||||
|
||||
return v
|
||||
default:
|
||||
panic(fmt.Sprintf("unimplemented Value for type: %s", t.Type))
|
||||
}
|
||||
}
|
||||
|
||||
// unindentHeredoc returns the string content of a HEREDOC if it is started with <<
|
||||
// and the content of a HEREDOC with the hanging indent removed if it is started with
|
||||
// a <<-, and the terminating line is at least as indented as the least indented line.
|
||||
func unindentHeredoc(heredoc string) string {
|
||||
// We need to find the end of the marker
|
||||
idx := strings.IndexByte(heredoc, '\n')
|
||||
if idx == -1 {
|
||||
panic("heredoc doesn't contain newline")
|
||||
}
|
||||
|
||||
unindent := heredoc[2] == '-'
|
||||
|
||||
// We can optimize if the heredoc isn't marked for indentation
|
||||
if !unindent {
|
||||
return string(heredoc[idx+1 : len(heredoc)-idx+1])
|
||||
}
|
||||
|
||||
// We need to unindent each line based on the indentation level of the marker
|
||||
lines := strings.Split(string(heredoc[idx+1:len(heredoc)-idx+2]), "\n")
|
||||
whitespacePrefix := lines[len(lines)-1]
|
||||
|
||||
isIndented := true
|
||||
for _, v := range lines {
|
||||
if strings.HasPrefix(v, whitespacePrefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
isIndented = false
|
||||
break
|
||||
}
|
||||
|
||||
// If all lines are not at least as indented as the terminating mark, return the
|
||||
// heredoc as is, but trim the leading space from the marker on the final line.
|
||||
if !isIndented {
|
||||
return strings.TrimRight(string(heredoc[idx+1:len(heredoc)-idx+1]), " \t")
|
||||
}
|
||||
|
||||
unindentedLines := make([]string, len(lines))
|
||||
for k, v := range lines {
|
||||
if k == len(lines)-1 {
|
||||
unindentedLines[k] = ""
|
||||
break
|
||||
}
|
||||
|
||||
unindentedLines[k] = strings.TrimPrefix(v, whitespacePrefix)
|
||||
}
|
||||
|
||||
return strings.Join(unindentedLines, "\n")
|
||||
}
|
111
vendor/github.com/hashicorp/hcl/json/parser/flatten.go
generated
vendored
Normal file
111
vendor/github.com/hashicorp/hcl/json/parser/flatten.go
generated
vendored
Normal file
|
@ -0,0 +1,111 @@
|
|||
package parser
|
||||
|
||||
import "github.com/hashicorp/hcl/hcl/ast"
|
||||
|
||||
// flattenObjects takes an AST node, walks it, and flattens
|
||||
func flattenObjects(node ast.Node) {
|
||||
ast.Walk(node, func(n ast.Node) (ast.Node, bool) {
|
||||
// We only care about lists, because this is what we modify
|
||||
list, ok := n.(*ast.ObjectList)
|
||||
if !ok {
|
||||
return n, true
|
||||
}
|
||||
|
||||
// Rebuild the item list
|
||||
items := make([]*ast.ObjectItem, 0, len(list.Items))
|
||||
frontier := make([]*ast.ObjectItem, len(list.Items))
|
||||
copy(frontier, list.Items)
|
||||
for len(frontier) > 0 {
|
||||
// Pop the current item
|
||||
n := len(frontier)
|
||||
item := frontier[n-1]
|
||||
frontier = frontier[:n-1]
|
||||
|
||||
switch v := item.Val.(type) {
|
||||
case *ast.ObjectType:
|
||||
items, frontier = flattenObjectType(v, item, items, frontier)
|
||||
case *ast.ListType:
|
||||
items, frontier = flattenListType(v, item, items, frontier)
|
||||
default:
|
||||
items = append(items, item)
|
||||
}
|
||||
}
|
||||
|
||||
// Reverse the list since the frontier model runs things backwards
|
||||
for i := len(items)/2 - 1; i >= 0; i-- {
|
||||
opp := len(items) - 1 - i
|
||||
items[i], items[opp] = items[opp], items[i]
|
||||
}
|
||||
|
||||
// Done! Set the original items
|
||||
list.Items = items
|
||||
return n, true
|
||||
})
|
||||
}
|
||||
|
||||
func flattenListType(
|
||||
ot *ast.ListType,
|
||||
item *ast.ObjectItem,
|
||||
items []*ast.ObjectItem,
|
||||
frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
|
||||
// All the elements of this object must also be objects!
|
||||
for _, subitem := range ot.List {
|
||||
if _, ok := subitem.(*ast.ObjectType); !ok {
|
||||
items = append(items, item)
|
||||
return items, frontier
|
||||
}
|
||||
}
|
||||
|
||||
// Great! We have a match go through all the items and flatten
|
||||
for _, elem := range ot.List {
|
||||
// Add it to the frontier so that we can recurse
|
||||
frontier = append(frontier, &ast.ObjectItem{
|
||||
Keys: item.Keys,
|
||||
Assign: item.Assign,
|
||||
Val: elem,
|
||||
LeadComment: item.LeadComment,
|
||||
LineComment: item.LineComment,
|
||||
})
|
||||
}
|
||||
|
||||
return items, frontier
|
||||
}
|
||||
|
||||
func flattenObjectType(
|
||||
ot *ast.ObjectType,
|
||||
item *ast.ObjectItem,
|
||||
items []*ast.ObjectItem,
|
||||
frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
|
||||
// If the list has no items we do not have to flatten anything
|
||||
if ot.List.Items == nil {
|
||||
items = append(items, item)
|
||||
return items, frontier
|
||||
}
|
||||
|
||||
// All the elements of this object must also be objects!
|
||||
for _, subitem := range ot.List.Items {
|
||||
if _, ok := subitem.Val.(*ast.ObjectType); !ok {
|
||||
items = append(items, item)
|
||||
return items, frontier
|
||||
}
|
||||
}
|
||||
|
||||
// Great! We have a match go through all the items and flatten
|
||||
for _, subitem := range ot.List.Items {
|
||||
// Copy the new key
|
||||
keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys))
|
||||
copy(keys, item.Keys)
|
||||
copy(keys[len(item.Keys):], subitem.Keys)
|
||||
|
||||
// Add it to the frontier so that we can recurse
|
||||
frontier = append(frontier, &ast.ObjectItem{
|
||||
Keys: keys,
|
||||
Assign: item.Assign,
|
||||
Val: subitem.Val,
|
||||
LeadComment: item.LeadComment,
|
||||
LineComment: item.LineComment,
|
||||
})
|
||||
}
|
||||
|
||||
return items, frontier
|
||||
}
|
303
vendor/github.com/hashicorp/hcl/json/parser/parser.go
generated
vendored
Normal file
303
vendor/github.com/hashicorp/hcl/json/parser/parser.go
generated
vendored
Normal file
|
@ -0,0 +1,303 @@
|
|||
package parser
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
"github.com/hashicorp/hcl/json/scanner"
|
||||
"github.com/hashicorp/hcl/json/token"
|
||||
)
|
||||
|
||||
type Parser struct {
|
||||
sc *scanner.Scanner
|
||||
|
||||
// Last read token
|
||||
tok token.Token
|
||||
commaPrev token.Token
|
||||
|
||||
enableTrace bool
|
||||
indent int
|
||||
n int // buffer size (max = 1)
|
||||
}
|
||||
|
||||
func newParser(src []byte) *Parser {
|
||||
return &Parser{
|
||||
sc: scanner.New(src),
|
||||
}
|
||||
}
|
||||
|
||||
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||
func Parse(src []byte) (*ast.File, error) {
|
||||
p := newParser(src)
|
||||
return p.Parse()
|
||||
}
|
||||
|
||||
var errEofToken = errors.New("EOF token found")
|
||||
|
||||
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||
func (p *Parser) Parse() (*ast.File, error) {
|
||||
f := &ast.File{}
|
||||
var err, scerr error
|
||||
p.sc.Error = func(pos token.Pos, msg string) {
|
||||
scerr = fmt.Errorf("%s: %s", pos, msg)
|
||||
}
|
||||
|
||||
// The root must be an object in JSON
|
||||
object, err := p.object()
|
||||
if scerr != nil {
|
||||
return nil, scerr
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We make our final node an object list so it is more HCL compatible
|
||||
f.Node = object.List
|
||||
|
||||
// Flatten it, which finds patterns and turns them into more HCL-like
|
||||
// AST trees.
|
||||
flattenObjects(f.Node)
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (p *Parser) objectList() (*ast.ObjectList, error) {
|
||||
defer un(trace(p, "ParseObjectList"))
|
||||
node := &ast.ObjectList{}
|
||||
|
||||
for {
|
||||
n, err := p.objectItem()
|
||||
if err == errEofToken {
|
||||
break // we are finished
|
||||
}
|
||||
|
||||
// we don't return a nil node, because might want to use already
|
||||
// collected items.
|
||||
if err != nil {
|
||||
return node, err
|
||||
}
|
||||
|
||||
node.Add(n)
|
||||
|
||||
// Check for a followup comma. If it isn't a comma, then we're done
|
||||
if tok := p.scan(); tok.Type != token.COMMA {
|
||||
break
|
||||
}
|
||||
}
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// objectItem parses a single object item
|
||||
func (p *Parser) objectItem() (*ast.ObjectItem, error) {
|
||||
defer un(trace(p, "ParseObjectItem"))
|
||||
|
||||
keys, err := p.objectKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o := &ast.ObjectItem{
|
||||
Keys: keys,
|
||||
}
|
||||
|
||||
switch p.tok.Type {
|
||||
case token.COLON:
|
||||
o.Val, err = p.objectValue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// objectKey parses an object key and returns a ObjectKey AST
|
||||
func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
|
||||
keyCount := 0
|
||||
keys := make([]*ast.ObjectKey, 0)
|
||||
|
||||
for {
|
||||
tok := p.scan()
|
||||
switch tok.Type {
|
||||
case token.EOF:
|
||||
return nil, errEofToken
|
||||
case token.STRING:
|
||||
keyCount++
|
||||
keys = append(keys, &ast.ObjectKey{
|
||||
Token: p.tok.HCLToken(),
|
||||
})
|
||||
case token.COLON:
|
||||
// If we have a zero keycount it means that we never got
|
||||
// an object key, i.e. `{ :`. This is a syntax error.
|
||||
if keyCount == 0 {
|
||||
return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
|
||||
}
|
||||
|
||||
// Done
|
||||
return keys, nil
|
||||
case token.ILLEGAL:
|
||||
fmt.Println("illegal")
|
||||
default:
|
||||
return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// object parses any type of object, such as number, bool, string, object or
|
||||
// list.
|
||||
func (p *Parser) objectValue() (ast.Node, error) {
|
||||
defer un(trace(p, "ParseObjectValue"))
|
||||
tok := p.scan()
|
||||
|
||||
switch tok.Type {
|
||||
case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING:
|
||||
return p.literalType()
|
||||
case token.LBRACE:
|
||||
return p.objectType()
|
||||
case token.LBRACK:
|
||||
return p.listType()
|
||||
case token.EOF:
|
||||
return nil, errEofToken
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok)
|
||||
}
|
||||
|
||||
// object parses any type of object, such as number, bool, string, object or
|
||||
// list.
|
||||
func (p *Parser) object() (*ast.ObjectType, error) {
|
||||
defer un(trace(p, "ParseType"))
|
||||
tok := p.scan()
|
||||
|
||||
switch tok.Type {
|
||||
case token.LBRACE:
|
||||
return p.objectType()
|
||||
case token.EOF:
|
||||
return nil, errEofToken
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok)
|
||||
}
|
||||
|
||||
// objectType parses an object type and returns a ObjectType AST
|
||||
func (p *Parser) objectType() (*ast.ObjectType, error) {
|
||||
defer un(trace(p, "ParseObjectType"))
|
||||
|
||||
// we assume that the currently scanned token is a LBRACE
|
||||
o := &ast.ObjectType{}
|
||||
|
||||
l, err := p.objectList()
|
||||
|
||||
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
|
||||
// not a RBRACE, it's an syntax error and we just return it.
|
||||
if err != nil && p.tok.Type != token.RBRACE {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o.List = l
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// listType parses a list type and returns a ListType AST
|
||||
func (p *Parser) listType() (*ast.ListType, error) {
|
||||
defer un(trace(p, "ParseListType"))
|
||||
|
||||
// we assume that the currently scanned token is a LBRACK
|
||||
l := &ast.ListType{}
|
||||
|
||||
for {
|
||||
tok := p.scan()
|
||||
switch tok.Type {
|
||||
case token.NUMBER, token.FLOAT, token.STRING:
|
||||
node, err := p.literalType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l.Add(node)
|
||||
case token.COMMA:
|
||||
continue
|
||||
case token.LBRACE:
|
||||
node, err := p.objectType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l.Add(node)
|
||||
case token.BOOL:
|
||||
// TODO(arslan) should we support? not supported by HCL yet
|
||||
case token.LBRACK:
|
||||
// TODO(arslan) should we support nested lists? Even though it's
|
||||
// written in README of HCL, it's not a part of the grammar
|
||||
// (not defined in parse.y)
|
||||
case token.RBRACK:
|
||||
// finished
|
||||
return l, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// literalType parses a literal type and returns a LiteralType AST
|
||||
func (p *Parser) literalType() (*ast.LiteralType, error) {
|
||||
defer un(trace(p, "ParseLiteral"))
|
||||
|
||||
return &ast.LiteralType{
|
||||
Token: p.tok.HCLToken(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// scan returns the next token from the underlying scanner. If a token has
|
||||
// been unscanned then read that instead.
|
||||
func (p *Parser) scan() token.Token {
|
||||
// If we have a token on the buffer, then return it.
|
||||
if p.n != 0 {
|
||||
p.n = 0
|
||||
return p.tok
|
||||
}
|
||||
|
||||
p.tok = p.sc.Scan()
|
||||
return p.tok
|
||||
}
|
||||
|
||||
// unscan pushes the previously read token back onto the buffer.
|
||||
func (p *Parser) unscan() {
|
||||
p.n = 1
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Parsing support
|
||||
|
||||
func (p *Parser) printTrace(a ...interface{}) {
|
||||
if !p.enableTrace {
|
||||
return
|
||||
}
|
||||
|
||||
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
|
||||
const n = len(dots)
|
||||
fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
|
||||
|
||||
i := 2 * p.indent
|
||||
for i > n {
|
||||
fmt.Print(dots)
|
||||
i -= n
|
||||
}
|
||||
// i <= n
|
||||
fmt.Print(dots[0:i])
|
||||
fmt.Println(a...)
|
||||
}
|
||||
|
||||
func trace(p *Parser, msg string) *Parser {
|
||||
p.printTrace(msg, "(")
|
||||
p.indent++
|
||||
return p
|
||||
}
|
||||
|
||||
// Usage pattern: defer un(trace(p, "..."))
|
||||
func un(p *Parser) {
|
||||
p.indent--
|
||||
p.printTrace(")")
|
||||
}
|
451
vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
generated
vendored
Normal file
451
vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
generated
vendored
Normal file
|
@ -0,0 +1,451 @@
|
|||
package scanner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/hashicorp/hcl/json/token"
|
||||
)
|
||||
|
||||
// eof represents a marker rune for the end of the reader.
|
||||
const eof = rune(0)
|
||||
|
||||
// Scanner defines a lexical scanner
|
||||
type Scanner struct {
|
||||
buf *bytes.Buffer // Source buffer for advancing and scanning
|
||||
src []byte // Source buffer for immutable access
|
||||
|
||||
// Source Position
|
||||
srcPos token.Pos // current position
|
||||
prevPos token.Pos // previous position, used for peek() method
|
||||
|
||||
lastCharLen int // length of last character in bytes
|
||||
lastLineLen int // length of last line in characters (for correct column reporting)
|
||||
|
||||
tokStart int // token text start position
|
||||
tokEnd int // token text end position
|
||||
|
||||
// Error is called for each error encountered. If no Error
|
||||
// function is set, the error is reported to os.Stderr.
|
||||
Error func(pos token.Pos, msg string)
|
||||
|
||||
// ErrorCount is incremented by one for each error encountered.
|
||||
ErrorCount int
|
||||
|
||||
// tokPos is the start position of most recently scanned token; set by
|
||||
// Scan. The Filename field is always left untouched by the Scanner. If
|
||||
// an error is reported (via Error) and Position is invalid, the scanner is
|
||||
// not inside a token.
|
||||
tokPos token.Pos
|
||||
}
|
||||
|
||||
// New creates and initializes a new instance of Scanner using src as
|
||||
// its source content.
|
||||
func New(src []byte) *Scanner {
|
||||
// even though we accept a src, we read from a io.Reader compatible type
|
||||
// (*bytes.Buffer). So in the future we might easily change it to streaming
|
||||
// read.
|
||||
b := bytes.NewBuffer(src)
|
||||
s := &Scanner{
|
||||
buf: b,
|
||||
src: src,
|
||||
}
|
||||
|
||||
// srcPosition always starts with 1
|
||||
s.srcPos.Line = 1
|
||||
return s
|
||||
}
|
||||
|
||||
// next reads the next rune from the bufferred reader. Returns the rune(0) if
|
||||
// an error occurs (or io.EOF is returned).
|
||||
func (s *Scanner) next() rune {
|
||||
ch, size, err := s.buf.ReadRune()
|
||||
if err != nil {
|
||||
// advance for error reporting
|
||||
s.srcPos.Column++
|
||||
s.srcPos.Offset += size
|
||||
s.lastCharLen = size
|
||||
return eof
|
||||
}
|
||||
|
||||
if ch == utf8.RuneError && size == 1 {
|
||||
s.srcPos.Column++
|
||||
s.srcPos.Offset += size
|
||||
s.lastCharLen = size
|
||||
s.err("illegal UTF-8 encoding")
|
||||
return ch
|
||||
}
|
||||
|
||||
// remember last position
|
||||
s.prevPos = s.srcPos
|
||||
|
||||
s.srcPos.Column++
|
||||
s.lastCharLen = size
|
||||
s.srcPos.Offset += size
|
||||
|
||||
if ch == '\n' {
|
||||
s.srcPos.Line++
|
||||
s.lastLineLen = s.srcPos.Column
|
||||
s.srcPos.Column = 0
|
||||
}
|
||||
|
||||
// debug
|
||||
// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
|
||||
return ch
|
||||
}
|
||||
|
||||
// unread unreads the previous read Rune and updates the source position
|
||||
func (s *Scanner) unread() {
|
||||
if err := s.buf.UnreadRune(); err != nil {
|
||||
panic(err) // this is user fault, we should catch it
|
||||
}
|
||||
s.srcPos = s.prevPos // put back last position
|
||||
}
|
||||
|
||||
// peek returns the next rune without advancing the reader.
|
||||
func (s *Scanner) peek() rune {
|
||||
peek, _, err := s.buf.ReadRune()
|
||||
if err != nil {
|
||||
return eof
|
||||
}
|
||||
|
||||
s.buf.UnreadRune()
|
||||
return peek
|
||||
}
|
||||
|
||||
// Scan scans the next token and returns the token.
|
||||
func (s *Scanner) Scan() token.Token {
|
||||
ch := s.next()
|
||||
|
||||
// skip white space
|
||||
for isWhitespace(ch) {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
var tok token.Type
|
||||
|
||||
// token text markings
|
||||
s.tokStart = s.srcPos.Offset - s.lastCharLen
|
||||
|
||||
// token position, initial next() is moving the offset by one(size of rune
|
||||
// actually), though we are interested with the starting point
|
||||
s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
|
||||
if s.srcPos.Column > 0 {
|
||||
// common case: last character was not a '\n'
|
||||
s.tokPos.Line = s.srcPos.Line
|
||||
s.tokPos.Column = s.srcPos.Column
|
||||
} else {
|
||||
// last character was a '\n'
|
||||
// (we cannot be at the beginning of the source
|
||||
// since we have called next() at least once)
|
||||
s.tokPos.Line = s.srcPos.Line - 1
|
||||
s.tokPos.Column = s.lastLineLen
|
||||
}
|
||||
|
||||
switch {
|
||||
case isLetter(ch):
|
||||
lit := s.scanIdentifier()
|
||||
if lit == "true" || lit == "false" {
|
||||
tok = token.BOOL
|
||||
} else if lit == "null" {
|
||||
tok = token.NULL
|
||||
} else {
|
||||
s.err("illegal char")
|
||||
}
|
||||
case isDecimal(ch):
|
||||
tok = s.scanNumber(ch)
|
||||
default:
|
||||
switch ch {
|
||||
case eof:
|
||||
tok = token.EOF
|
||||
case '"':
|
||||
tok = token.STRING
|
||||
s.scanString()
|
||||
case '.':
|
||||
tok = token.PERIOD
|
||||
ch = s.peek()
|
||||
if isDecimal(ch) {
|
||||
tok = token.FLOAT
|
||||
ch = s.scanMantissa(ch)
|
||||
ch = s.scanExponent(ch)
|
||||
}
|
||||
case '[':
|
||||
tok = token.LBRACK
|
||||
case ']':
|
||||
tok = token.RBRACK
|
||||
case '{':
|
||||
tok = token.LBRACE
|
||||
case '}':
|
||||
tok = token.RBRACE
|
||||
case ',':
|
||||
tok = token.COMMA
|
||||
case ':':
|
||||
tok = token.COLON
|
||||
case '-':
|
||||
if isDecimal(s.peek()) {
|
||||
ch := s.next()
|
||||
tok = s.scanNumber(ch)
|
||||
} else {
|
||||
s.err("illegal char")
|
||||
}
|
||||
default:
|
||||
s.err("illegal char: " + string(ch))
|
||||
}
|
||||
}
|
||||
|
||||
// finish token ending
|
||||
s.tokEnd = s.srcPos.Offset
|
||||
|
||||
// create token literal
|
||||
var tokenText string
|
||||
if s.tokStart >= 0 {
|
||||
tokenText = string(s.src[s.tokStart:s.tokEnd])
|
||||
}
|
||||
s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
|
||||
|
||||
return token.Token{
|
||||
Type: tok,
|
||||
Pos: s.tokPos,
|
||||
Text: tokenText,
|
||||
}
|
||||
}
|
||||
|
||||
// scanNumber scans a HCL number definition starting with the given rune
|
||||
func (s *Scanner) scanNumber(ch rune) token.Type {
|
||||
zero := ch == '0'
|
||||
pos := s.srcPos
|
||||
|
||||
s.scanMantissa(ch)
|
||||
ch = s.next() // seek forward
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.scanExponent(ch)
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if ch == '.' {
|
||||
ch = s.scanFraction(ch)
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.next()
|
||||
ch = s.scanExponent(ch)
|
||||
}
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
|
||||
// If we have a larger number and this is zero, error
|
||||
if zero && pos != s.srcPos {
|
||||
s.err("numbers cannot start with 0")
|
||||
}
|
||||
|
||||
return token.NUMBER
|
||||
}
|
||||
|
||||
// scanMantissa scans the mantissa begining from the rune. It returns the next
|
||||
// non decimal rune. It's used to determine wheter it's a fraction or exponent.
|
||||
func (s *Scanner) scanMantissa(ch rune) rune {
|
||||
scanned := false
|
||||
for isDecimal(ch) {
|
||||
ch = s.next()
|
||||
scanned = true
|
||||
}
|
||||
|
||||
if scanned && ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanFraction scans the fraction after the '.' rune
|
||||
func (s *Scanner) scanFraction(ch rune) rune {
|
||||
if ch == '.' {
|
||||
ch = s.peek() // we peek just to see if we can move forward
|
||||
ch = s.scanMantissa(ch)
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
|
||||
// rune.
|
||||
func (s *Scanner) scanExponent(ch rune) rune {
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.next()
|
||||
if ch == '-' || ch == '+' {
|
||||
ch = s.next()
|
||||
}
|
||||
ch = s.scanMantissa(ch)
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanString scans a quoted string
|
||||
func (s *Scanner) scanString() {
|
||||
braces := 0
|
||||
for {
|
||||
// '"' opening already consumed
|
||||
// read character after quote
|
||||
ch := s.next()
|
||||
|
||||
if ch == '\n' || ch < 0 || ch == eof {
|
||||
s.err("literal not terminated")
|
||||
return
|
||||
}
|
||||
|
||||
if ch == '"' {
|
||||
break
|
||||
}
|
||||
|
||||
// If we're going into a ${} then we can ignore quotes for awhile
|
||||
if braces == 0 && ch == '$' && s.peek() == '{' {
|
||||
braces++
|
||||
s.next()
|
||||
} else if braces > 0 && ch == '{' {
|
||||
braces++
|
||||
}
|
||||
if braces > 0 && ch == '}' {
|
||||
braces--
|
||||
}
|
||||
|
||||
if ch == '\\' {
|
||||
s.scanEscape()
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// scanEscape scans an escape sequence
|
||||
func (s *Scanner) scanEscape() rune {
|
||||
// http://en.cppreference.com/w/cpp/language/escape
|
||||
ch := s.next() // read character after '/'
|
||||
switch ch {
|
||||
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
|
||||
// nothing to do
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
// octal notation
|
||||
ch = s.scanDigits(ch, 8, 3)
|
||||
case 'x':
|
||||
// hexademical notation
|
||||
ch = s.scanDigits(s.next(), 16, 2)
|
||||
case 'u':
|
||||
// universal character name
|
||||
ch = s.scanDigits(s.next(), 16, 4)
|
||||
case 'U':
|
||||
// universal character name
|
||||
ch = s.scanDigits(s.next(), 16, 8)
|
||||
default:
|
||||
s.err("illegal char escape")
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanDigits scans a rune with the given base for n times. For example an
|
||||
// octal notation \184 would yield in scanDigits(ch, 8, 3)
|
||||
func (s *Scanner) scanDigits(ch rune, base, n int) rune {
|
||||
for n > 0 && digitVal(ch) < base {
|
||||
ch = s.next()
|
||||
n--
|
||||
}
|
||||
if n > 0 {
|
||||
s.err("illegal char escape")
|
||||
}
|
||||
|
||||
// we scanned all digits, put the last non digit char back
|
||||
s.unread()
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanIdentifier scans an identifier and returns the literal string
|
||||
func (s *Scanner) scanIdentifier() string {
|
||||
offs := s.srcPos.Offset - s.lastCharLen
|
||||
ch := s.next()
|
||||
for isLetter(ch) || isDigit(ch) || ch == '-' {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread() // we got identifier, put back latest char
|
||||
}
|
||||
|
||||
return string(s.src[offs:s.srcPos.Offset])
|
||||
}
|
||||
|
||||
// recentPosition returns the position of the character immediately after the
|
||||
// character or token returned by the last call to Scan.
|
||||
func (s *Scanner) recentPosition() (pos token.Pos) {
|
||||
pos.Offset = s.srcPos.Offset - s.lastCharLen
|
||||
switch {
|
||||
case s.srcPos.Column > 0:
|
||||
// common case: last character was not a '\n'
|
||||
pos.Line = s.srcPos.Line
|
||||
pos.Column = s.srcPos.Column
|
||||
case s.lastLineLen > 0:
|
||||
// last character was a '\n'
|
||||
// (we cannot be at the beginning of the source
|
||||
// since we have called next() at least once)
|
||||
pos.Line = s.srcPos.Line - 1
|
||||
pos.Column = s.lastLineLen
|
||||
default:
|
||||
// at the beginning of the source
|
||||
pos.Line = 1
|
||||
pos.Column = 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// err prints the error of any scanning to s.Error function. If the function is
|
||||
// not defined, by default it prints them to os.Stderr
|
||||
func (s *Scanner) err(msg string) {
|
||||
s.ErrorCount++
|
||||
pos := s.recentPosition()
|
||||
|
||||
if s.Error != nil {
|
||||
s.Error(pos, msg)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is a letter
|
||||
func isLetter(ch rune) bool {
|
||||
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is a decimal digit
|
||||
func isDigit(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is a decimal number
|
||||
func isDecimal(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9'
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is an hexadecimal number
|
||||
func isHexadecimal(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
|
||||
}
|
||||
|
||||
// isWhitespace returns true if the rune is a space, tab, newline or carriage return
|
||||
func isWhitespace(ch rune) bool {
|
||||
return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
|
||||
}
|
||||
|
||||
// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
|
||||
func digitVal(ch rune) int {
|
||||
switch {
|
||||
case '0' <= ch && ch <= '9':
|
||||
return int(ch - '0')
|
||||
case 'a' <= ch && ch <= 'f':
|
||||
return int(ch - 'a' + 10)
|
||||
case 'A' <= ch && ch <= 'F':
|
||||
return int(ch - 'A' + 10)
|
||||
}
|
||||
return 16 // larger than any legal digit val
|
||||
}
|
46
vendor/github.com/hashicorp/hcl/json/token/position.go
generated
vendored
Normal file
46
vendor/github.com/hashicorp/hcl/json/token/position.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
package token
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Pos describes an arbitrary source position
|
||||
// including the file, line, and column location.
|
||||
// A Position is valid if the line number is > 0.
|
||||
type Pos struct {
|
||||
Filename string // filename, if any
|
||||
Offset int // offset, starting at 0
|
||||
Line int // line number, starting at 1
|
||||
Column int // column number, starting at 1 (character count)
|
||||
}
|
||||
|
||||
// IsValid returns true if the position is valid.
|
||||
func (p *Pos) IsValid() bool { return p.Line > 0 }
|
||||
|
||||
// String returns a string in one of several forms:
|
||||
//
|
||||
// file:line:column valid position with file name
|
||||
// line:column valid position without file name
|
||||
// file invalid position with file name
|
||||
// - invalid position without file name
|
||||
func (p Pos) String() string {
|
||||
s := p.Filename
|
||||
if p.IsValid() {
|
||||
if s != "" {
|
||||
s += ":"
|
||||
}
|
||||
s += fmt.Sprintf("%d:%d", p.Line, p.Column)
|
||||
}
|
||||
if s == "" {
|
||||
s = "-"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Before reports whether the position p is before u.
|
||||
func (p Pos) Before(u Pos) bool {
|
||||
return u.Offset > p.Offset || u.Line > p.Line
|
||||
}
|
||||
|
||||
// After reports whether the position p is after u.
|
||||
func (p Pos) After(u Pos) bool {
|
||||
return u.Offset < p.Offset || u.Line < p.Line
|
||||
}
|
118
vendor/github.com/hashicorp/hcl/json/token/token.go
generated
vendored
Normal file
118
vendor/github.com/hashicorp/hcl/json/token/token.go
generated
vendored
Normal file
|
@ -0,0 +1,118 @@
|
|||
package token
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
hcltoken "github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
// Token defines a single HCL token which can be obtained via the Scanner
|
||||
type Token struct {
|
||||
Type Type
|
||||
Pos Pos
|
||||
Text string
|
||||
}
|
||||
|
||||
// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
|
||||
type Type int
|
||||
|
||||
const (
|
||||
// Special tokens
|
||||
ILLEGAL Type = iota
|
||||
EOF
|
||||
|
||||
identifier_beg
|
||||
literal_beg
|
||||
NUMBER // 12345
|
||||
FLOAT // 123.45
|
||||
BOOL // true,false
|
||||
STRING // "abc"
|
||||
NULL // null
|
||||
literal_end
|
||||
identifier_end
|
||||
|
||||
operator_beg
|
||||
LBRACK // [
|
||||
LBRACE // {
|
||||
COMMA // ,
|
||||
PERIOD // .
|
||||
COLON // :
|
||||
|
||||
RBRACK // ]
|
||||
RBRACE // }
|
||||
|
||||
operator_end
|
||||
)
|
||||
|
||||
var tokens = [...]string{
|
||||
ILLEGAL: "ILLEGAL",
|
||||
|
||||
EOF: "EOF",
|
||||
|
||||
NUMBER: "NUMBER",
|
||||
FLOAT: "FLOAT",
|
||||
BOOL: "BOOL",
|
||||
STRING: "STRING",
|
||||
NULL: "NULL",
|
||||
|
||||
LBRACK: "LBRACK",
|
||||
LBRACE: "LBRACE",
|
||||
COMMA: "COMMA",
|
||||
PERIOD: "PERIOD",
|
||||
COLON: "COLON",
|
||||
|
||||
RBRACK: "RBRACK",
|
||||
RBRACE: "RBRACE",
|
||||
}
|
||||
|
||||
// String returns the string corresponding to the token tok.
|
||||
func (t Type) String() string {
|
||||
s := ""
|
||||
if 0 <= t && t < Type(len(tokens)) {
|
||||
s = tokens[t]
|
||||
}
|
||||
if s == "" {
|
||||
s = "token(" + strconv.Itoa(int(t)) + ")"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// IsIdentifier returns true for tokens corresponding to identifiers and basic
|
||||
// type literals; it returns false otherwise.
|
||||
func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
|
||||
|
||||
// IsLiteral returns true for tokens corresponding to basic type literals; it
|
||||
// returns false otherwise.
|
||||
func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
|
||||
|
||||
// IsOperator returns true for tokens corresponding to operators and
|
||||
// delimiters; it returns false otherwise.
|
||||
func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
|
||||
|
||||
// String returns the token's literal text. Note that this is only
|
||||
// applicable for certain token types, such as token.IDENT,
|
||||
// token.STRING, etc..
|
||||
func (t Token) String() string {
|
||||
return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
|
||||
}
|
||||
|
||||
// HCLToken converts this token to an HCL token.
|
||||
//
|
||||
// The token type must be a literal type or this will panic.
|
||||
func (t Token) HCLToken() hcltoken.Token {
|
||||
switch t.Type {
|
||||
case BOOL:
|
||||
return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text}
|
||||
case FLOAT:
|
||||
return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text}
|
||||
case NULL:
|
||||
return hcltoken.Token{Type: hcltoken.STRING, Text: ""}
|
||||
case NUMBER:
|
||||
return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text}
|
||||
case STRING:
|
||||
return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true}
|
||||
default:
|
||||
panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type))
|
||||
}
|
||||
}
|
38
vendor/github.com/hashicorp/hcl/lex.go
generated
vendored
Normal file
38
vendor/github.com/hashicorp/hcl/lex.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
package hcl
|
||||
|
||||
import (
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type lexModeValue byte
|
||||
|
||||
const (
|
||||
lexModeUnknown lexModeValue = iota
|
||||
lexModeHcl
|
||||
lexModeJson
|
||||
)
|
||||
|
||||
// lexMode returns whether we're going to be parsing in JSON
|
||||
// mode or HCL mode.
|
||||
func lexMode(v []byte) lexModeValue {
|
||||
var (
|
||||
r rune
|
||||
w int
|
||||
offset int
|
||||
)
|
||||
|
||||
for {
|
||||
r, w = utf8.DecodeRune(v[offset:])
|
||||
offset += w
|
||||
if unicode.IsSpace(r) {
|
||||
continue
|
||||
}
|
||||
if r == '{' {
|
||||
return lexModeJson
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return lexModeHcl
|
||||
}
|
39
vendor/github.com/hashicorp/hcl/parse.go
generated
vendored
Normal file
39
vendor/github.com/hashicorp/hcl/parse.go
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
package hcl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
hclParser "github.com/hashicorp/hcl/hcl/parser"
|
||||
jsonParser "github.com/hashicorp/hcl/json/parser"
|
||||
)
|
||||
|
||||
// ParseBytes accepts as input byte slice and returns ast tree.
|
||||
//
|
||||
// Input can be either JSON or HCL
|
||||
func ParseBytes(in []byte) (*ast.File, error) {
|
||||
return parse(in)
|
||||
}
|
||||
|
||||
// ParseString accepts input as a string and returns ast tree.
|
||||
func ParseString(input string) (*ast.File, error) {
|
||||
return parse([]byte(input))
|
||||
}
|
||||
|
||||
func parse(in []byte) (*ast.File, error) {
|
||||
switch lexMode(in) {
|
||||
case lexModeHcl:
|
||||
return hclParser.Parse(in)
|
||||
case lexModeJson:
|
||||
return jsonParser.Parse(in)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unknown config format")
|
||||
}
|
||||
|
||||
// Parse parses the given input and returns the root object.
|
||||
//
|
||||
// The input format can be either HCL or JSON.
|
||||
func Parse(input string) (*ast.File, error) {
|
||||
return parse([]byte(input))
|
||||
}
|
363
vendor/github.com/hashicorp/vault/LICENSE
generated
vendored
Normal file
363
vendor/github.com/hashicorp/vault/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,363 @@
|
|||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. "Contributor"
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the terms of
|
||||
a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
|
||||
means a work that combines Covered Software with other material, in a
|
||||
separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether
|
||||
at the time of the initial grant or subsequently, any and all of the
|
||||
rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the License,
|
||||
by the making, using, selling, offering for sale, having made, import,
|
||||
or transfer of either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, "control" means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights to
|
||||
grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter the
|
||||
recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty, or
|
||||
limitations of liability) contained within the Source Code Form of the
|
||||
Covered Software, except that You may alter any license notices to the
|
||||
extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute,
|
||||
judicial order, or regulation then You must: (a) comply with the terms of
|
||||
this License to the maximum extent possible; and (b) describe the
|
||||
limitations and the code they affect. Such description must be placed in a
|
||||
text file included with all distributions of the Covered Software under
|
||||
this License. Except to the extent prohibited by statute or regulation,
|
||||
such description must be sufficiently detailed for a recipient of ordinary
|
||||
skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing
|
||||
basis, if such Contributor fails to notify You of the non-compliance by
|
||||
some reasonable means prior to 60 days after You have come back into
|
||||
compliance. Moreover, Your grants from a particular Contributor are
|
||||
reinstated on an ongoing basis if such Contributor notifies You of the
|
||||
non-compliance by some reasonable means, this is the first time You have
|
||||
received notice of non-compliance with this License from such
|
||||
Contributor, and You become compliant prior to 30 days after Your receipt
|
||||
of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an "as is" basis,
|
||||
without warranty of any kind, either expressed, implied, or statutory,
|
||||
including, without limitation, warranties that the Covered Software is free
|
||||
of defects, merchantable, fit for a particular purpose or non-infringing.
|
||||
The entire risk as to the quality and performance of the Covered Software
|
||||
is with You. Should any Covered Software prove defective in any respect,
|
||||
You (not any Contributor) assume the cost of any necessary servicing,
|
||||
repair, or correction. This disclaimer of warranty constitutes an essential
|
||||
part of this License. No use of any Covered Software is authorized under
|
||||
this License except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from
|
||||
such party's negligence to the extent applicable law prohibits such
|
||||
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
||||
incidental or consequential damages, so this exclusion and limitation may
|
||||
not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts
|
||||
of a jurisdiction where the defendant maintains its principal place of
|
||||
business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
||||
in this Section shall prevent a party's ability to bring cross-claims or
|
||||
counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides that
|
||||
the language of a contract shall be construed against the drafter shall not
|
||||
be used to construe this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses If You choose to distribute Source Code Form that is
|
||||
Incompatible With Secondary Licenses under the terms of this version of
|
||||
the License, the notice described in Exhibit B of this License must be
|
||||
attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file,
|
||||
then You may include the notice in a location (such as a LICENSE file in a
|
||||
relevant directory) where a recipient would be likely to look for such a
|
||||
notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
|
||||
This Source Code Form is "Incompatible
|
||||
With Secondary Licenses", as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
||||
|
611
vendor/github.com/hashicorp/vault/api/SPEC.md
generated
vendored
Normal file
611
vendor/github.com/hashicorp/vault/api/SPEC.md
generated
vendored
Normal file
|
@ -0,0 +1,611 @@
|
|||
FORMAT: 1A
|
||||
|
||||
# vault
|
||||
|
||||
The Vault API gives you full access to the Vault project.
|
||||
|
||||
If you're browsing this API specifiction in GitHub or in raw
|
||||
format, please excuse some of the odd formatting. This document
|
||||
is in api-blueprint format that is read by viewers such as
|
||||
Apiary.
|
||||
|
||||
## Sealed vs. Unsealed
|
||||
|
||||
Whenever an individual Vault server is started, it is started
|
||||
in the _sealed_ state. In this state, it knows where its data
|
||||
is located, but the data is encrypted and Vault doesn't have the
|
||||
encryption keys to access it. Before Vault can operate, it must
|
||||
be _unsealed_.
|
||||
|
||||
**Note:** Sealing/unsealing has no relationship to _authentication_
|
||||
which is separate and still required once the Vault is unsealed.
|
||||
|
||||
Instead of being sealed with a single key, we utilize
|
||||
[Shamir's Secret Sharing](http://en.wikipedia.org/wiki/Shamir%27s_Secret_Sharing)
|
||||
to shard a key into _n_ parts such that _t_ parts are required
|
||||
to reconstruct the original key, where `t <= n`. This means that
|
||||
Vault itself doesn't know the original key, and no single person
|
||||
has the original key (unless `n = 1`, or `t` parts are given to
|
||||
a single person).
|
||||
|
||||
Unsealing is done via an unauthenticated
|
||||
[unseal API](#reference/seal/unseal/unseal). This API takes a single
|
||||
master shard and progresses the unsealing process. Once all shards
|
||||
are given, the Vault is either unsealed or resets the unsealing
|
||||
process if the key was invalid.
|
||||
|
||||
The entire seal/unseal state is server-wide. This allows multiple
|
||||
distinct operators to use the unseal API (or more likely the
|
||||
`vault unseal` command) from separate computers/networks and never
|
||||
have to transmit their key in order to unseal the vault in a
|
||||
distributed fashion.
|
||||
|
||||
## Transport
|
||||
|
||||
The API is expected to be accessed over a TLS connection at
|
||||
all times, with a valid certificate that is verified by a well
|
||||
behaved client.
|
||||
|
||||
## Authentication
|
||||
|
||||
Once the Vault is unsealed, every other operation requires
|
||||
authentication. There are multiple methods for authentication
|
||||
that can be enabled (see
|
||||
[authentication](#reference/authentication)).
|
||||
|
||||
Authentication is done with the login endpoint. The login endpoint
|
||||
returns an access token that is set as the `X-Vault-Token` header.
|
||||
|
||||
## Help
|
||||
|
||||
To retrieve the help for any API within Vault, including mounted
|
||||
backends, credential providers, etc. then append `?help=1` to any
|
||||
URL. If you have valid permission to access the path, then the help text
|
||||
will be returned with the following structure:
|
||||
|
||||
{
|
||||
"help": "help text"
|
||||
}
|
||||
|
||||
## Error Response
|
||||
|
||||
A common JSON structure is always returned to return errors:
|
||||
|
||||
{
|
||||
"errors": [
|
||||
"message",
|
||||
"another message"
|
||||
]
|
||||
}
|
||||
|
||||
This structure will be sent down for any non-20x HTTP status.
|
||||
|
||||
## HTTP Status Codes
|
||||
|
||||
The following HTTP status codes are used throughout the API.
|
||||
|
||||
- `200` - Success with data.
|
||||
- `204` - Success, no data returned.
|
||||
- `400` - Invalid request, missing or invalid data.
|
||||
- `403` - Forbidden, your authentication details are either
|
||||
incorrect or you don't have access to this feature.
|
||||
- `404` - Invalid path. This can both mean that the path truly
|
||||
doesn't exist or that you don't have permission to view a
|
||||
specific path. We use 404 in some cases to avoid state leakage.
|
||||
- `429` - Rate limit exceeded. Try again after waiting some period
|
||||
of time.
|
||||
- `500` - Internal server error. An internal error has occurred,
|
||||
try again later. If the error persists, report a bug.
|
||||
- `503` - Vault is down for maintenance or is currently sealed.
|
||||
Try again later.
|
||||
|
||||
# Group Initialization
|
||||
|
||||
## Initialization [/sys/init]
|
||||
### Initialization Status [GET]
|
||||
Returns the status of whether the vault is initialized or not. The
|
||||
vault doesn't have to be unsealed for this operation.
|
||||
|
||||
+ Response 200 (application/json)
|
||||
|
||||
{
|
||||
"initialized": true
|
||||
}
|
||||
|
||||
### Initialize [POST]
|
||||
Initialize the vault. This is an unauthenticated request to initially
|
||||
setup a new vault. Although this is unauthenticated, it is still safe:
|
||||
data cannot be in vault prior to initialization, and any future
|
||||
authentication will fail if you didn't initialize it yourself.
|
||||
Additionally, once initialized, a vault cannot be reinitialized.
|
||||
|
||||
This API is the only time Vault will ever be aware of your keys, and
|
||||
the only time the keys will ever be returned in one unit. Care should
|
||||
be taken to ensure that the output of this request is never logged,
|
||||
and that the keys are properly distributed.
|
||||
|
||||
The response also contains the initial root token that can be used
|
||||
as authentication in order to initially configure Vault once it is
|
||||
unsealed. Just as with the unseal keys, this is the only time Vault is
|
||||
ever aware of this token.
|
||||
|
||||
+ Request (application/json)
|
||||
|
||||
{
|
||||
"secret_shares": 5,
|
||||
"secret_threshold": 3,
|
||||
}
|
||||
|
||||
+ Response 200 (application/json)
|
||||
|
||||
{
|
||||
"keys": ["one", "two", "three"],
|
||||
"root_token": "foo"
|
||||
}
|
||||
|
||||
# Group Seal/Unseal
|
||||
|
||||
## Seal Status [/sys/seal-status]
|
||||
### Seal Status [GET]
|
||||
Returns the status of whether the vault is currently
|
||||
sealed or not, as well as the progress of unsealing.
|
||||
|
||||
The response has the following attributes:
|
||||
|
||||
- sealed (boolean) - If true, the vault is sealed. Otherwise,
|
||||
it is unsealed.
|
||||
- t (int) - The "t" value for the master key, or the number
|
||||
of shards needed total to unseal the vault.
|
||||
- n (int) - The "n" value for the master key, or the total
|
||||
number of shards of the key distributed.
|
||||
- progress (int) - The number of master key shards that have
|
||||
been entered so far towards unsealing the vault.
|
||||
|
||||
+ Response 200 (application/json)
|
||||
|
||||
{
|
||||
"sealed": true,
|
||||
"t": 3,
|
||||
"n": 5,
|
||||
"progress": 1
|
||||
}
|
||||
|
||||
## Seal [/sys/seal]
|
||||
### Seal [PUT]
|
||||
Seal the vault.
|
||||
|
||||
Sealing the vault locks Vault from any future operations on any
|
||||
secrets or system configuration until the vault is once again
|
||||
unsealed. Internally, sealing throws away the keys to access the
|
||||
encrypted vault data, so Vault is unable to access the data without
|
||||
unsealing to get the encryption keys.
|
||||
|
||||
+ Response 204
|
||||
|
||||
## Unseal [/sys/unseal]
|
||||
### Unseal [PUT]
|
||||
Unseal the vault.
|
||||
|
||||
Unseal the vault by entering a portion of the master key. The
|
||||
response object will tell you if the unseal is complete or
|
||||
only partial.
|
||||
|
||||
If the vault is already unsealed, this does nothing. It is
|
||||
not an error, the return value just says the vault is unsealed.
|
||||
Due to the architecture of Vault, we cannot validate whether
|
||||
any portion of the unseal key given is valid until all keys
|
||||
are inputted, therefore unsealing an already unsealed vault
|
||||
is still a success even if the input key is invalid.
|
||||
|
||||
+ Request (application/json)
|
||||
|
||||
{
|
||||
"key": "value"
|
||||
}
|
||||
|
||||
+ Response 200 (application/json)
|
||||
|
||||
{
|
||||
"sealed": true,
|
||||
"t": 3,
|
||||
"n": 5,
|
||||
"progress": 1
|
||||
}
|
||||
|
||||
# Group Authentication
|
||||
|
||||
## List Auth Methods [/sys/auth]
|
||||
### List all auth methods [GET]
|
||||
Lists all available authentication methods.
|
||||
|
||||
This returns the name of the authentication method as well as
|
||||
a human-friendly long-form help text for the method that can be
|
||||
shown to the user as documentation.
|
||||
|
||||
+ Response 200 (application/json)
|
||||
|
||||
{
|
||||
"token": {
|
||||
"type": "token",
|
||||
"description": "Token authentication"
|
||||
},
|
||||
"oauth": {
|
||||
"type": "oauth",
|
||||
"description": "OAuth authentication"
|
||||
}
|
||||
}
|
||||
|
||||
## Single Auth Method [/sys/auth/{id}]
|
||||
|
||||
+ Parameters
|
||||
+ id (required, string) ... The ID of the auth method.
|
||||
|
||||
### Enable an auth method [PUT]
|
||||
Enables an authentication method.
|
||||
|
||||
The body of the request depends on the authentication method
|
||||
being used. Please reference the documentation for the specific
|
||||
authentication method you're enabling in order to determine what
|
||||
parameters you must give it.
|
||||
|
||||
If an authentication method is already enabled, then this can be
|
||||
used to change the configuration, including even the type of
|
||||
the configuration.
|
||||
|
||||
+ Request (application/json)
|
||||
|
||||
{
|
||||
"type": "type",
|
||||
"key": "value",
|
||||
"key2": "value2"
|
||||
}
|
||||
|
||||
+ Response 204
|
||||
|
||||
### Disable an auth method [DELETE]
|
||||
Disables an authentication method. Previously authenticated sessions
|
||||
are immediately invalidated.
|
||||
|
||||
+ Response 204
|
||||
|
||||
# Group Policies
|
||||
|
||||
Policies are named permission sets that identities returned by
|
||||
credential stores are bound to. This separates _authentication_
|
||||
from _authorization_.
|
||||
|
||||
## Policies [/sys/policy]
|
||||
### List all Policies [GET]
|
||||
|
||||
List all the policies.
|
||||
|
||||
+ Response 200 (application/json)
|
||||
|
||||
{
|
||||
"policies": ["root"]
|
||||
}
|
||||
|
||||
## Single Policy [/sys/policy/{id}]
|
||||
|
||||
+ Parameters
|
||||
+ id (required, string) ... The name of the policy
|
||||
|
||||
### Upsert [PUT]
|
||||
|
||||
Create or update a policy with the given ID.
|
||||
|
||||
+ Request (application/json)
|
||||
|
||||
{
|
||||
"rules": "HCL"
|
||||
}
|
||||
|
||||
+ Response 204
|
||||
|
||||
### Delete [DELETE]
|
||||
|
||||
Delete a policy with the given ID. Any identities bound to this
|
||||
policy will immediately become "deny all" despite already being
|
||||
authenticated.
|
||||
|
||||
+ Response 204
|
||||
|
||||
# Group Mounts
|
||||
|
||||
Logical backends are mounted at _mount points_, similar to
|
||||
filesystems. This allows you to mount the "aws" logical backend
|
||||
at the "aws-us-east" path, so all access is at `/aws-us-east/keys/foo`
|
||||
for example. This enables multiple logical backends to be enabled.
|
||||
|
||||
## Mounts [/sys/mounts]
|
||||
### List all mounts [GET]
|
||||
|
||||
Lists all the active mount points.
|
||||
|
||||
+ Response 200 (application/json)
|
||||
|
||||
{
|
||||
"aws": {
|
||||
"type": "aws",
|
||||
"description": "AWS"
|
||||
},
|
||||
"pg": {
|
||||
"type": "postgresql",
|
||||
"description": "PostgreSQL dynamic users"
|
||||
}
|
||||
}
|
||||
|
||||
## Single Mount [/sys/mounts/{path}]
|
||||
### New Mount [POST]
|
||||
|
||||
Mount a logical backend to a new path.
|
||||
|
||||
Configuration for this new backend is done via the normal
|
||||
read/write mechanism once it is mounted.
|
||||
|
||||
+ Request (application/json)
|
||||
|
||||
{
|
||||
"type": "aws",
|
||||
"description": "EU AWS tokens"
|
||||
}
|
||||
|
||||
+ Response 204
|
||||
|
||||
### Unmount [DELETE]
|
||||
|
||||
Unmount a mount point.
|
||||
|
||||
+ Response 204
|
||||
|
||||
## Remount [/sys/remount]
|
||||
### Remount [POST]
|
||||
|
||||
Move an already-mounted backend to a new path.
|
||||
|
||||
+ Request (application/json)
|
||||
|
||||
{
|
||||
"from": "aws",
|
||||
"to": "aws-east"
|
||||
}
|
||||
|
||||
+ Response 204
|
||||
|
||||
# Group Audit Backends
|
||||
|
||||
Audit backends are responsible for shuttling the audit logs that
|
||||
Vault generates to a durable system for future querying. By default,
|
||||
audit logs are not stored anywhere.
|
||||
|
||||
## Audit Backends [/sys/audit]
|
||||
### List Enabled Audit Backends [GET]
|
||||
|
||||
List all the enabled audit backends
|
||||
|
||||
+ Response 200 (application/json)
|
||||
|
||||
{
|
||||
"file": {
|
||||
"type": "file",
|
||||
"description": "Send audit logs to a file",
|
||||
"options": {}
|
||||
}
|
||||
}
|
||||
|
||||
## Single Audit Backend [/sys/audit/{path}]
|
||||
|
||||
+ Parameters
|
||||
+ path (required, string) ... The path where the audit backend is mounted
|
||||
|
||||
### Enable [PUT]
|
||||
|
||||
Enable an audit backend.
|
||||
|
||||
+ Request (application/json)
|
||||
|
||||
{
|
||||
"type": "file",
|
||||
"description": "send to a file",
|
||||
"options": {
|
||||
"path": "/var/log/vault.audit.log"
|
||||
}
|
||||
}
|
||||
|
||||
+ Response 204
|
||||
|
||||
### Disable [DELETE]
|
||||
|
||||
Disable an audit backend.
|
||||
|
||||
+ Request (application/json)
|
||||
|
||||
+ Response 204
|
||||
|
||||
# Group Secrets
|
||||
|
||||
## Generic [/{mount}/{path}]
|
||||
|
||||
This group documents the general format of reading and writing
|
||||
to Vault. The exact structure of the keyspace is defined by the
|
||||
logical backends in use, so documentation related to
|
||||
a specific backend should be referenced for details on what keys
|
||||
and routes are expected.
|
||||
|
||||
The path for examples are `/prefix/path`, but in practice
|
||||
these will be defined by the backends that are mounted. For
|
||||
example, reading an AWS key might be at the `/aws/root` path.
|
||||
These paths are defined by the logical backends.
|
||||
|
||||
+ Parameters
|
||||
+ mount (required, string) ... The mount point for the
|
||||
logical backend. Example: `aws`.
|
||||
+ path (optional, string) ... The path within the backend
|
||||
to read or write data.
|
||||
|
||||
### Read [GET]
|
||||
|
||||
Read data from vault.
|
||||
|
||||
The data read from the vault can either be a secret or
|
||||
arbitrary configuration data. The type of data returned
|
||||
depends on the path, and is defined by the logical backend.
|
||||
|
||||
If the return value is a secret, then the return structure
|
||||
is a mixture of arbitrary key/value along with the following
|
||||
fields which are guaranteed to exist:
|
||||
|
||||
- `lease_id` (string) - A unique ID used for renewal and
|
||||
revocation.
|
||||
|
||||
- `renewable` (bool) - If true, then this key can be renewed.
|
||||
If a key can't be renewed, then a new key must be requested
|
||||
after the lease duration period.
|
||||
|
||||
- `lease_duration` (int) - The time in seconds that a secret is
|
||||
valid for before it must be renewed.
|
||||
|
||||
- `lease_duration_max` (int) - The maximum amount of time in
|
||||
seconds that a secret is valid for. This will always be
|
||||
greater than or equal to `lease_duration`. The difference
|
||||
between this and `lease_duration` is an overlap window
|
||||
where multiple keys may be valid.
|
||||
|
||||
If the return value is not a secret, then the return structure
|
||||
is an arbitrary JSON object.
|
||||
|
||||
+ Response 200 (application/json)
|
||||
|
||||
{
|
||||
"lease_id": "UUID",
|
||||
"lease_duration": 3600,
|
||||
"key": "value"
|
||||
}
|
||||
|
||||
### Write [PUT]
|
||||
|
||||
Write data to vault.
|
||||
|
||||
The behavior and arguments to the write are defined by
|
||||
the logical backend.
|
||||
|
||||
+ Request (application/json)
|
||||
|
||||
{
|
||||
"key": "value"
|
||||
}
|
||||
|
||||
+ Response 204
|
||||
|
||||
# Group Lease Management
|
||||
|
||||
## Renew Key [/sys/renew/{id}]
|
||||
|
||||
+ Parameters
|
||||
+ id (required, string) ... The `lease_id` of the secret
|
||||
to renew.
|
||||
|
||||
### Renew [PUT]
|
||||
|
||||
+ Response 200 (application/json)
|
||||
|
||||
{
|
||||
"lease_id": "...",
|
||||
"lease_duration": 3600,
|
||||
"access_key": "foo",
|
||||
"secret_key": "bar"
|
||||
}
|
||||
|
||||
## Revoke Key [/sys/revoke/{id}]
|
||||
|
||||
+ Parameters
|
||||
+ id (required, string) ... The `lease_id` of the secret
|
||||
to revoke.
|
||||
|
||||
### Revoke [PUT]
|
||||
|
||||
+ Response 204
|
||||
|
||||
# Group Backend: AWS
|
||||
|
||||
## Root Key [/aws/root]
|
||||
### Set the Key [PUT]
|
||||
|
||||
Set the root key that the logical backend will use to create
|
||||
new secrets, IAM policies, etc.
|
||||
|
||||
+ Request (application/json)
|
||||
|
||||
{
|
||||
"access_key": "key",
|
||||
"secret_key": "key",
|
||||
"region": "us-east-1"
|
||||
}
|
||||
|
||||
+ Response 204
|
||||
|
||||
## Policies [/aws/policies]
|
||||
### List Policies [GET]
|
||||
|
||||
List all the policies that can be used to create keys.
|
||||
|
||||
+ Response 200 (application/json)
|
||||
|
||||
[{
|
||||
"name": "root",
|
||||
"description": "Root access"
|
||||
}, {
|
||||
"name": "web-deploy",
|
||||
"description": "Enough permissions to deploy the web app."
|
||||
}]
|
||||
|
||||
## Single Policy [/aws/policies/{name}]
|
||||
|
||||
+ Parameters
|
||||
+ name (required, string) ... Name of the policy.
|
||||
|
||||
### Read [GET]
|
||||
|
||||
Read a policy.
|
||||
|
||||
+ Response 200 (application/json)
|
||||
|
||||
{
|
||||
"policy": "base64-encoded policy"
|
||||
}
|
||||
|
||||
### Upsert [PUT]
|
||||
|
||||
Create or update a policy.
|
||||
|
||||
+ Request (application/json)
|
||||
|
||||
{
|
||||
"policy": "base64-encoded policy"
|
||||
}
|
||||
|
||||
+ Response 204
|
||||
|
||||
### Delete [DELETE]
|
||||
|
||||
Delete the policy with the given name.
|
||||
|
||||
+ Response 204
|
||||
|
||||
## Generate Access Keys [/aws/keys/{policy}]
|
||||
### Create [GET]
|
||||
|
||||
This generates a new keypair for the given policy.
|
||||
|
||||
+ Parameters
|
||||
+ policy (required, string) ... The policy under which to create
|
||||
the key pair.
|
||||
|
||||
+ Response 200 (application/json)
|
||||
|
||||
{
|
||||
"lease_id": "...",
|
||||
"lease_duration": 3600,
|
||||
"access_key": "foo",
|
||||
"secret_key": "bar"
|
||||
}
|
11
vendor/github.com/hashicorp/vault/api/auth.go
generated
vendored
Normal file
11
vendor/github.com/hashicorp/vault/api/auth.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
package api
|
||||
|
||||
// Auth is used to perform credential backend related operations.
|
||||
type Auth struct {
|
||||
c *Client
|
||||
}
|
||||
|
||||
// Auth is used to return the client for credential-backend API calls.
|
||||
func (c *Client) Auth() *Auth {
|
||||
return &Auth{c: c}
|
||||
}
|
223
vendor/github.com/hashicorp/vault/api/auth_token.go
generated
vendored
Normal file
223
vendor/github.com/hashicorp/vault/api/auth_token.go
generated
vendored
Normal file
|
@ -0,0 +1,223 @@
|
|||
package api
|
||||
|
||||
// TokenAuth is used to perform token backend operations on Vault
|
||||
type TokenAuth struct {
|
||||
c *Client
|
||||
}
|
||||
|
||||
// Token is used to return the client for token-backend API calls
|
||||
func (a *Auth) Token() *TokenAuth {
|
||||
return &TokenAuth{c: a.c}
|
||||
}
|
||||
|
||||
func (c *TokenAuth) Create(opts *TokenCreateRequest) (*Secret, error) {
|
||||
r := c.c.NewRequest("POST", "/v1/auth/token/create")
|
||||
if err := r.SetJSONBody(opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return ParseSecret(resp.Body)
|
||||
}
|
||||
|
||||
func (c *TokenAuth) CreateOrphan(opts *TokenCreateRequest) (*Secret, error) {
|
||||
r := c.c.NewRequest("POST", "/v1/auth/token/create-orphan")
|
||||
if err := r.SetJSONBody(opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return ParseSecret(resp.Body)
|
||||
}
|
||||
|
||||
func (c *TokenAuth) CreateWithRole(opts *TokenCreateRequest, roleName string) (*Secret, error) {
|
||||
r := c.c.NewRequest("POST", "/v1/auth/token/create/"+roleName)
|
||||
if err := r.SetJSONBody(opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return ParseSecret(resp.Body)
|
||||
}
|
||||
|
||||
func (c *TokenAuth) Lookup(token string) (*Secret, error) {
|
||||
r := c.c.NewRequest("POST", "/v1/auth/token/lookup")
|
||||
if err := r.SetJSONBody(map[string]interface{}{
|
||||
"token": token,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return ParseSecret(resp.Body)
|
||||
}
|
||||
|
||||
func (c *TokenAuth) LookupAccessor(accessor string) (*Secret, error) {
|
||||
r := c.c.NewRequest("POST", "/v1/auth/token/lookup-accessor")
|
||||
if err := r.SetJSONBody(map[string]interface{}{
|
||||
"accessor": accessor,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return ParseSecret(resp.Body)
|
||||
}
|
||||
|
||||
func (c *TokenAuth) LookupSelf() (*Secret, error) {
|
||||
r := c.c.NewRequest("GET", "/v1/auth/token/lookup-self")
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return ParseSecret(resp.Body)
|
||||
}
|
||||
|
||||
func (c *TokenAuth) Renew(token string, increment int) (*Secret, error) {
|
||||
r := c.c.NewRequest("PUT", "/v1/auth/token/renew")
|
||||
if err := r.SetJSONBody(map[string]interface{}{
|
||||
"token": token,
|
||||
"increment": increment,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return ParseSecret(resp.Body)
|
||||
}
|
||||
|
||||
func (c *TokenAuth) RenewSelf(increment int) (*Secret, error) {
|
||||
r := c.c.NewRequest("PUT", "/v1/auth/token/renew-self")
|
||||
|
||||
body := map[string]interface{}{"increment": increment}
|
||||
if err := r.SetJSONBody(body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return ParseSecret(resp.Body)
|
||||
}
|
||||
|
||||
// RevokeAccessor revokes a token associated with the given accessor
|
||||
// along with all the child tokens.
|
||||
func (c *TokenAuth) RevokeAccessor(accessor string) error {
|
||||
r := c.c.NewRequest("POST", "/v1/auth/token/revoke-accessor")
|
||||
if err := r.SetJSONBody(map[string]interface{}{
|
||||
"accessor": accessor,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RevokeOrphan revokes a token without revoking the tree underneath it (so
|
||||
// child tokens are orphaned rather than revoked)
|
||||
func (c *TokenAuth) RevokeOrphan(token string) error {
|
||||
r := c.c.NewRequest("PUT", "/v1/auth/token/revoke-orphan")
|
||||
if err := r.SetJSONBody(map[string]interface{}{
|
||||
"token": token,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RevokeSelf revokes the token making the call. The `token` parameter is kept
|
||||
// for backwards compatibility but is ignored; only the client's set token has
|
||||
// an effect.
|
||||
func (c *TokenAuth) RevokeSelf(token string) error {
|
||||
r := c.c.NewRequest("PUT", "/v1/auth/token/revoke-self")
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RevokeTree is the "normal" revoke operation that revokes the given token and
|
||||
// the entire tree underneath -- all of its child tokens, their child tokens,
|
||||
// etc.
|
||||
func (c *TokenAuth) RevokeTree(token string) error {
|
||||
r := c.c.NewRequest("PUT", "/v1/auth/token/revoke")
|
||||
if err := r.SetJSONBody(map[string]interface{}{
|
||||
"token": token,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TokenCreateRequest is the options structure for creating a token.
|
||||
type TokenCreateRequest struct {
|
||||
ID string `json:"id,omitempty"`
|
||||
Policies []string `json:"policies,omitempty"`
|
||||
Metadata map[string]string `json:"meta,omitempty"`
|
||||
Lease string `json:"lease,omitempty"`
|
||||
TTL string `json:"ttl,omitempty"`
|
||||
ExplicitMaxTTL string `json:"explicit_max_ttl,omitempty"`
|
||||
Period string `json:"period,omitempty"`
|
||||
NoParent bool `json:"no_parent,omitempty"`
|
||||
NoDefaultPolicy bool `json:"no_default_policy,omitempty"`
|
||||
DisplayName string `json:"display_name"`
|
||||
NumUses int `json:"num_uses"`
|
||||
Renewable *bool `json:"renewable,omitempty"`
|
||||
}
|
432
vendor/github.com/hashicorp/vault/api/client.go
generated
vendored
Normal file
432
vendor/github.com/hashicorp/vault/api/client.go
generated
vendored
Normal file
|
@ -0,0 +1,432 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"path"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/go-rootcerts"
|
||||
"github.com/sethgrid/pester"
|
||||
)
|
||||
|
||||
const EnvVaultAddress = "VAULT_ADDR"
|
||||
const EnvVaultCACert = "VAULT_CACERT"
|
||||
const EnvVaultCAPath = "VAULT_CAPATH"
|
||||
const EnvVaultClientCert = "VAULT_CLIENT_CERT"
|
||||
const EnvVaultClientKey = "VAULT_CLIENT_KEY"
|
||||
const EnvVaultInsecure = "VAULT_SKIP_VERIFY"
|
||||
const EnvVaultTLSServerName = "VAULT_TLS_SERVER_NAME"
|
||||
const EnvVaultWrapTTL = "VAULT_WRAP_TTL"
|
||||
const EnvVaultMaxRetries = "VAULT_MAX_RETRIES"
|
||||
const EnvVaultToken = "VAULT_TOKEN"
|
||||
|
||||
// WrappingLookupFunc is a function that, given an HTTP verb and a path,
|
||||
// returns an optional string duration to be used for response wrapping (e.g.
|
||||
// "15s", or simply "15"). The path will not begin with "/v1/" or "v1/" or "/",
|
||||
// however, end-of-path forward slashes are not trimmed, so must match your
|
||||
// called path precisely.
|
||||
type WrappingLookupFunc func(operation, path string) string
|
||||
|
||||
// Config is used to configure the creation of the client.
|
||||
type Config struct {
|
||||
// Address is the address of the Vault server. This should be a complete
|
||||
// URL such as "http://vault.example.com". If you need a custom SSL
|
||||
// cert or want to enable insecure mode, you need to specify a custom
|
||||
// HttpClient.
|
||||
Address string
|
||||
|
||||
// HttpClient is the HTTP client to use, which will currently always have the
|
||||
// same values as http.DefaultClient. This is used to control redirect behavior.
|
||||
HttpClient *http.Client
|
||||
|
||||
redirectSetup sync.Once
|
||||
|
||||
// MaxRetries controls the maximum number of times to retry when a 5xx error
|
||||
// occurs. Set to 0 or less to disable retrying. Defaults to 0.
|
||||
MaxRetries int
|
||||
}
|
||||
|
||||
// TLSConfig contains the parameters needed to configure TLS on the HTTP client
|
||||
// used to communicate with Vault.
|
||||
type TLSConfig struct {
|
||||
// CACert is the path to a PEM-encoded CA cert file to use to verify the
|
||||
// Vault server SSL certificate.
|
||||
CACert string
|
||||
|
||||
// CAPath is the path to a directory of PEM-encoded CA cert files to verify
|
||||
// the Vault server SSL certificate.
|
||||
CAPath string
|
||||
|
||||
// ClientCert is the path to the certificate for Vault communication
|
||||
ClientCert string
|
||||
|
||||
// ClientKey is the path to the private key for Vault communication
|
||||
ClientKey string
|
||||
|
||||
// TLSServerName, if set, is used to set the SNI host when connecting via
|
||||
// TLS.
|
||||
TLSServerName string
|
||||
|
||||
// Insecure enables or disables SSL verification
|
||||
Insecure bool
|
||||
}
|
||||
|
||||
// DefaultConfig returns a default configuration for the client. It is
|
||||
// safe to modify the return value of this function.
|
||||
//
|
||||
// The default Address is https://127.0.0.1:8200, but this can be overridden by
|
||||
// setting the `VAULT_ADDR` environment variable.
|
||||
func DefaultConfig() *Config {
|
||||
config := &Config{
|
||||
Address: "https://127.0.0.1:8200",
|
||||
HttpClient: cleanhttp.DefaultClient(),
|
||||
}
|
||||
config.HttpClient.Timeout = time.Second * 60
|
||||
transport := config.HttpClient.Transport.(*http.Transport)
|
||||
transport.TLSHandshakeTimeout = 10 * time.Second
|
||||
transport.TLSClientConfig = &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
}
|
||||
|
||||
if v := os.Getenv(EnvVaultAddress); v != "" {
|
||||
config.Address = v
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
// ConfigureTLS takes a set of TLS configurations and applies those to the the HTTP client.
|
||||
func (c *Config) ConfigureTLS(t *TLSConfig) error {
|
||||
if c.HttpClient == nil {
|
||||
c.HttpClient = DefaultConfig().HttpClient
|
||||
}
|
||||
|
||||
var clientCert tls.Certificate
|
||||
foundClientCert := false
|
||||
if t.CACert != "" || t.CAPath != "" || t.ClientCert != "" || t.ClientKey != "" || t.Insecure {
|
||||
if t.ClientCert != "" && t.ClientKey != "" {
|
||||
var err error
|
||||
clientCert, err = tls.LoadX509KeyPair(t.ClientCert, t.ClientKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
foundClientCert = true
|
||||
} else if t.ClientCert != "" || t.ClientKey != "" {
|
||||
return fmt.Errorf("Both client cert and client key must be provided")
|
||||
}
|
||||
}
|
||||
|
||||
clientTLSConfig := c.HttpClient.Transport.(*http.Transport).TLSClientConfig
|
||||
rootConfig := &rootcerts.Config{
|
||||
CAFile: t.CACert,
|
||||
CAPath: t.CAPath,
|
||||
}
|
||||
if err := rootcerts.ConfigureTLS(clientTLSConfig, rootConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
clientTLSConfig.InsecureSkipVerify = t.Insecure
|
||||
|
||||
if foundClientCert {
|
||||
clientTLSConfig.Certificates = []tls.Certificate{clientCert}
|
||||
}
|
||||
if t.TLSServerName != "" {
|
||||
clientTLSConfig.ServerName = t.TLSServerName
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadEnvironment reads configuration information from the
|
||||
// environment. If there is an error, no configuration value
|
||||
// is updated.
|
||||
func (c *Config) ReadEnvironment() error {
|
||||
var envAddress string
|
||||
var envCACert string
|
||||
var envCAPath string
|
||||
var envClientCert string
|
||||
var envClientKey string
|
||||
var envInsecure bool
|
||||
var envTLSServerName string
|
||||
var envMaxRetries *uint64
|
||||
|
||||
// Parse the environment variables
|
||||
if v := os.Getenv(EnvVaultAddress); v != "" {
|
||||
envAddress = v
|
||||
}
|
||||
if v := os.Getenv(EnvVaultMaxRetries); v != "" {
|
||||
maxRetries, err := strconv.ParseUint(v, 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
envMaxRetries = &maxRetries
|
||||
}
|
||||
if v := os.Getenv(EnvVaultCACert); v != "" {
|
||||
envCACert = v
|
||||
}
|
||||
if v := os.Getenv(EnvVaultCAPath); v != "" {
|
||||
envCAPath = v
|
||||
}
|
||||
if v := os.Getenv(EnvVaultClientCert); v != "" {
|
||||
envClientCert = v
|
||||
}
|
||||
if v := os.Getenv(EnvVaultClientKey); v != "" {
|
||||
envClientKey = v
|
||||
}
|
||||
if v := os.Getenv(EnvVaultInsecure); v != "" {
|
||||
var err error
|
||||
envInsecure, err = strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not parse VAULT_SKIP_VERIFY")
|
||||
}
|
||||
}
|
||||
if v := os.Getenv(EnvVaultTLSServerName); v != "" {
|
||||
envTLSServerName = v
|
||||
}
|
||||
|
||||
// Configure the HTTP clients TLS configuration.
|
||||
t := &TLSConfig{
|
||||
CACert: envCACert,
|
||||
CAPath: envCAPath,
|
||||
ClientCert: envClientCert,
|
||||
ClientKey: envClientKey,
|
||||
TLSServerName: envTLSServerName,
|
||||
Insecure: envInsecure,
|
||||
}
|
||||
if err := c.ConfigureTLS(t); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if envAddress != "" {
|
||||
c.Address = envAddress
|
||||
}
|
||||
|
||||
if envMaxRetries != nil {
|
||||
c.MaxRetries = int(*envMaxRetries) + 1
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Client is the client to the Vault API. Create a client with
|
||||
// NewClient.
|
||||
type Client struct {
|
||||
addr *url.URL
|
||||
config *Config
|
||||
token string
|
||||
wrappingLookupFunc WrappingLookupFunc
|
||||
}
|
||||
|
||||
// NewClient returns a new client for the given configuration.
|
||||
//
|
||||
// If the environment variable `VAULT_TOKEN` is present, the token will be
|
||||
// automatically added to the client. Otherwise, you must manually call
|
||||
// `SetToken()`.
|
||||
func NewClient(c *Config) (*Client, error) {
|
||||
if c == nil {
|
||||
c = DefaultConfig()
|
||||
if err := c.ReadEnvironment(); err != nil {
|
||||
return nil, fmt.Errorf("error reading environment: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
u, err := url.Parse(c.Address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if c.HttpClient == nil {
|
||||
c.HttpClient = DefaultConfig().HttpClient
|
||||
}
|
||||
|
||||
tp := c.HttpClient.Transport.(*http.Transport)
|
||||
if err := http2.ConfigureTransport(tp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
redirFunc := func() {
|
||||
// Ensure redirects are not automatically followed
|
||||
// Note that this is sane for the API client as it has its own
|
||||
// redirect handling logic (and thus also for command/meta),
|
||||
// but in e.g. http_test actual redirect handling is necessary
|
||||
c.HttpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
// Returning this value causes the Go net library to not close the
|
||||
// response body and to nil out the error. Otherwise pester tries
|
||||
// three times on every redirect because it sees an error from this
|
||||
// function (to prevent redirects) passing through to it.
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
}
|
||||
|
||||
c.redirectSetup.Do(redirFunc)
|
||||
|
||||
client := &Client{
|
||||
addr: u,
|
||||
config: c,
|
||||
}
|
||||
|
||||
if token := os.Getenv(EnvVaultToken); token != "" {
|
||||
client.SetToken(token)
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// Sets the address of Vault in the client. The format of address should be
|
||||
// "<Scheme>://<Host>:<Port>". Setting this on a client will override the
|
||||
// value of VAULT_ADDR environment variable.
|
||||
func (c *Client) SetAddress(addr string) error {
|
||||
var err error
|
||||
if c.addr, err = url.Parse(addr); err != nil {
|
||||
return fmt.Errorf("failed to set address: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Address returns the Vault URL the client is configured to connect to
|
||||
func (c *Client) Address() string {
|
||||
return c.addr.String()
|
||||
}
|
||||
|
||||
// SetMaxRetries sets the number of retries that will be used in the case of certain errors
|
||||
func (c *Client) SetMaxRetries(retries int) {
|
||||
c.config.MaxRetries = retries
|
||||
}
|
||||
|
||||
// SetWrappingLookupFunc sets a lookup function that returns desired wrap TTLs
|
||||
// for a given operation and path
|
||||
func (c *Client) SetWrappingLookupFunc(lookupFunc WrappingLookupFunc) {
|
||||
c.wrappingLookupFunc = lookupFunc
|
||||
}
|
||||
|
||||
// Token returns the access token being used by this client. It will
|
||||
// return the empty string if there is no token set.
|
||||
func (c *Client) Token() string {
|
||||
return c.token
|
||||
}
|
||||
|
||||
// SetToken sets the token directly. This won't perform any auth
|
||||
// verification, it simply sets the token properly for future requests.
|
||||
func (c *Client) SetToken(v string) {
|
||||
c.token = v
|
||||
}
|
||||
|
||||
// ClearToken deletes the token if it is set or does nothing otherwise.
|
||||
func (c *Client) ClearToken() {
|
||||
c.token = ""
|
||||
}
|
||||
|
||||
// NewRequest creates a new raw request object to query the Vault server
|
||||
// configured for this client. This is an advanced method and generally
|
||||
// doesn't need to be called externally.
|
||||
func (c *Client) NewRequest(method, requestPath string) *Request {
|
||||
req := &Request{
|
||||
Method: method,
|
||||
URL: &url.URL{
|
||||
User: c.addr.User,
|
||||
Scheme: c.addr.Scheme,
|
||||
Host: c.addr.Host,
|
||||
Path: path.Join(c.addr.Path, requestPath),
|
||||
},
|
||||
ClientToken: c.token,
|
||||
Params: make(map[string][]string),
|
||||
}
|
||||
|
||||
var lookupPath string
|
||||
switch {
|
||||
case strings.HasPrefix(requestPath, "/v1/"):
|
||||
lookupPath = strings.TrimPrefix(requestPath, "/v1/")
|
||||
case strings.HasPrefix(requestPath, "v1/"):
|
||||
lookupPath = strings.TrimPrefix(requestPath, "v1/")
|
||||
default:
|
||||
lookupPath = requestPath
|
||||
}
|
||||
if c.wrappingLookupFunc != nil {
|
||||
req.WrapTTL = c.wrappingLookupFunc(method, lookupPath)
|
||||
} else {
|
||||
req.WrapTTL = DefaultWrappingLookupFunc(method, lookupPath)
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
||||
|
||||
// RawRequest performs the raw request given. This request may be against
|
||||
// a Vault server not configured with this client. This is an advanced operation
|
||||
// that generally won't need to be called externally.
|
||||
func (c *Client) RawRequest(r *Request) (*Response, error) {
|
||||
redirectCount := 0
|
||||
START:
|
||||
req, err := r.ToHTTP()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := pester.NewExtendedClient(c.config.HttpClient)
|
||||
client.Backoff = pester.LinearJitterBackoff
|
||||
client.MaxRetries = c.config.MaxRetries
|
||||
|
||||
var result *Response
|
||||
resp, err := client.Do(req)
|
||||
if resp != nil {
|
||||
result = &Response{Response: resp}
|
||||
}
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "tls: oversized") {
|
||||
err = fmt.Errorf(
|
||||
"%s\n\n"+
|
||||
"This error usually means that the server is running with TLS disabled\n"+
|
||||
"but the client is configured to use TLS. Please either enable TLS\n"+
|
||||
"on the server or run the client with -address set to an address\n"+
|
||||
"that uses the http protocol:\n\n"+
|
||||
" vault <command> -address http://<address>\n\n"+
|
||||
"You can also set the VAULT_ADDR environment variable:\n\n\n"+
|
||||
" VAULT_ADDR=http://<address> vault <command>\n\n"+
|
||||
"where <address> is replaced by the actual address to the server.",
|
||||
err)
|
||||
}
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Check for a redirect, only allowing for a single redirect
|
||||
if (resp.StatusCode == 301 || resp.StatusCode == 302 || resp.StatusCode == 307) && redirectCount == 0 {
|
||||
// Parse the updated location
|
||||
respLoc, err := resp.Location()
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Ensure a protocol downgrade doesn't happen
|
||||
if req.URL.Scheme == "https" && respLoc.Scheme != "https" {
|
||||
return result, fmt.Errorf("redirect would cause protocol downgrade")
|
||||
}
|
||||
|
||||
// Update the request
|
||||
r.URL = respLoc
|
||||
|
||||
// Reset the request body if any
|
||||
if err := r.ResetJSONBody(); err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Retry the request
|
||||
redirectCount++
|
||||
goto START
|
||||
}
|
||||
|
||||
if err := result.Error(); err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
25
vendor/github.com/hashicorp/vault/api/help.go
generated
vendored
Normal file
25
vendor/github.com/hashicorp/vault/api/help.go
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Help reads the help information for the given path.
|
||||
func (c *Client) Help(path string) (*Help, error) {
|
||||
r := c.NewRequest("GET", fmt.Sprintf("/v1/%s", path))
|
||||
r.Params.Add("help", "1")
|
||||
resp, err := c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result Help
|
||||
err = resp.DecodeJSON(&result)
|
||||
return &result, err
|
||||
}
|
||||
|
||||
type Help struct {
|
||||
Help string `json:"help"`
|
||||
SeeAlso []string `json:"see_also"`
|
||||
}
|
185
vendor/github.com/hashicorp/vault/api/logical.go
generated
vendored
Normal file
185
vendor/github.com/hashicorp/vault/api/logical.go
generated
vendored
Normal file
|
@ -0,0 +1,185 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/hashicorp/vault/helper/jsonutil"
|
||||
)
|
||||
|
||||
const (
|
||||
wrappedResponseLocation = "cubbyhole/response"
|
||||
)
|
||||
|
||||
var (
|
||||
// The default TTL that will be used with `sys/wrapping/wrap`, can be
|
||||
// changed
|
||||
DefaultWrappingTTL = "5m"
|
||||
|
||||
// The default function used if no other function is set, which honors the
|
||||
// env var and wraps `sys/wrapping/wrap`
|
||||
DefaultWrappingLookupFunc = func(operation, path string) string {
|
||||
if os.Getenv(EnvVaultWrapTTL) != "" {
|
||||
return os.Getenv(EnvVaultWrapTTL)
|
||||
}
|
||||
|
||||
if (operation == "PUT" || operation == "POST") && path == "sys/wrapping/wrap" {
|
||||
return DefaultWrappingTTL
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
)
|
||||
|
||||
// Logical is used to perform logical backend operations on Vault.
|
||||
type Logical struct {
|
||||
c *Client
|
||||
}
|
||||
|
||||
// Logical is used to return the client for logical-backend API calls.
|
||||
func (c *Client) Logical() *Logical {
|
||||
return &Logical{c: c}
|
||||
}
|
||||
|
||||
func (c *Logical) Read(path string) (*Secret, error) {
|
||||
r := c.c.NewRequest("GET", "/v1/"+path)
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if resp != nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
if resp != nil && resp.StatusCode == 404 {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ParseSecret(resp.Body)
|
||||
}
|
||||
|
||||
func (c *Logical) List(path string) (*Secret, error) {
|
||||
r := c.c.NewRequest("LIST", "/v1/"+path)
|
||||
// Set this for broader compatibility, but we use LIST above to be able to
|
||||
// handle the wrapping lookup function
|
||||
r.Method = "GET"
|
||||
r.Params.Set("list", "true")
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if resp != nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
if resp != nil && resp.StatusCode == 404 {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ParseSecret(resp.Body)
|
||||
}
|
||||
|
||||
func (c *Logical) Write(path string, data map[string]interface{}) (*Secret, error) {
|
||||
r := c.c.NewRequest("PUT", "/v1/"+path)
|
||||
if err := r.SetJSONBody(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if resp != nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode == 200 {
|
||||
return ParseSecret(resp.Body)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *Logical) Delete(path string) (*Secret, error) {
|
||||
r := c.c.NewRequest("DELETE", "/v1/"+path)
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if resp != nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode == 200 {
|
||||
return ParseSecret(resp.Body)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) {
|
||||
var data map[string]interface{}
|
||||
if wrappingToken != "" {
|
||||
if c.c.Token() == "" {
|
||||
c.c.SetToken(wrappingToken)
|
||||
} else if wrappingToken != c.c.Token() {
|
||||
data = map[string]interface{}{
|
||||
"token": wrappingToken,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r := c.c.NewRequest("PUT", "/v1/sys/wrapping/unwrap")
|
||||
if err := r.SetJSONBody(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if resp != nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
if err != nil {
|
||||
if resp != nil && resp.StatusCode != 404 {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK: // New method is supported
|
||||
return ParseSecret(resp.Body)
|
||||
case http.StatusNotFound: // Fall back to old method
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if wrappingToken != "" {
|
||||
origToken := c.c.Token()
|
||||
defer c.c.SetToken(origToken)
|
||||
c.c.SetToken(wrappingToken)
|
||||
}
|
||||
|
||||
secret, err := c.Read(wrappedResponseLocation)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading %s: %s", wrappedResponseLocation, err)
|
||||
}
|
||||
if secret == nil {
|
||||
return nil, fmt.Errorf("no value found at %s", wrappedResponseLocation)
|
||||
}
|
||||
if secret.Data == nil {
|
||||
return nil, fmt.Errorf("\"data\" not found in wrapping response")
|
||||
}
|
||||
if _, ok := secret.Data["response"]; !ok {
|
||||
return nil, fmt.Errorf("\"response\" not found in wrapping response \"data\" map")
|
||||
}
|
||||
|
||||
wrappedSecret := new(Secret)
|
||||
buf := bytes.NewBufferString(secret.Data["response"].(string))
|
||||
if err := jsonutil.DecodeJSONFromReader(buf, wrappedSecret); err != nil {
|
||||
return nil, fmt.Errorf("error unmarshaling wrapped secret: %s", err)
|
||||
}
|
||||
|
||||
return wrappedSecret, nil
|
||||
}
|
72
vendor/github.com/hashicorp/vault/api/request.go
generated
vendored
Normal file
72
vendor/github.com/hashicorp/vault/api/request.go
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// Request is a raw request configuration structure used to initiate
|
||||
// API requests to the Vault server.
|
||||
type Request struct {
|
||||
Method string
|
||||
URL *url.URL
|
||||
Params url.Values
|
||||
ClientToken string
|
||||
WrapTTL string
|
||||
Obj interface{}
|
||||
Body io.Reader
|
||||
BodySize int64
|
||||
}
|
||||
|
||||
// SetJSONBody is used to set a request body that is a JSON-encoded value.
|
||||
func (r *Request) SetJSONBody(val interface{}) error {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
enc := json.NewEncoder(buf)
|
||||
if err := enc.Encode(val); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Obj = val
|
||||
r.Body = buf
|
||||
r.BodySize = int64(buf.Len())
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResetJSONBody is used to reset the body for a redirect
|
||||
func (r *Request) ResetJSONBody() error {
|
||||
if r.Body == nil {
|
||||
return nil
|
||||
}
|
||||
return r.SetJSONBody(r.Obj)
|
||||
}
|
||||
|
||||
// ToHTTP turns this request into a valid *http.Request for use with the
|
||||
// net/http package.
|
||||
func (r *Request) ToHTTP() (*http.Request, error) {
|
||||
// Encode the query parameters
|
||||
r.URL.RawQuery = r.Params.Encode()
|
||||
|
||||
// Create the HTTP request
|
||||
req, err := http.NewRequest(r.Method, r.URL.RequestURI(), r.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.URL.User = r.URL.User
|
||||
req.URL.Scheme = r.URL.Scheme
|
||||
req.URL.Host = r.URL.Host
|
||||
req.Host = r.URL.Host
|
||||
|
||||
if len(r.ClientToken) != 0 {
|
||||
req.Header.Set("X-Vault-Token", r.ClientToken)
|
||||
}
|
||||
|
||||
if len(r.WrapTTL) != 0 {
|
||||
req.Header.Set("X-Vault-Wrap-TTL", r.WrapTTL)
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
72
vendor/github.com/hashicorp/vault/api/response.go
generated
vendored
Normal file
72
vendor/github.com/hashicorp/vault/api/response.go
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/hashicorp/vault/helper/jsonutil"
|
||||
)
|
||||
|
||||
// Response is a raw response that wraps an HTTP response.
|
||||
type Response struct {
|
||||
*http.Response
|
||||
}
|
||||
|
||||
// DecodeJSON will decode the response body to a JSON structure. This
|
||||
// will consume the response body, but will not close it. Close must
|
||||
// still be called.
|
||||
func (r *Response) DecodeJSON(out interface{}) error {
|
||||
return jsonutil.DecodeJSONFromReader(r.Body, out)
|
||||
}
|
||||
|
||||
// Error returns an error response if there is one. If there is an error,
|
||||
// this will fully consume the response body, but will not close it. The
|
||||
// body must still be closed manually.
|
||||
func (r *Response) Error() error {
|
||||
// 200 to 399 are okay status codes
|
||||
if r.StatusCode >= 200 && r.StatusCode < 400 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We have an error. Let's copy the body into our own buffer first,
|
||||
// so that if we can't decode JSON, we can at least copy it raw.
|
||||
var bodyBuf bytes.Buffer
|
||||
if _, err := io.Copy(&bodyBuf, r.Body); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode the error response if we can. Note that we wrap the bodyBuf
|
||||
// in a bytes.Reader here so that the JSON decoder doesn't move the
|
||||
// read pointer for the original buffer.
|
||||
var resp ErrorResponse
|
||||
if err := jsonutil.DecodeJSON(bodyBuf.Bytes(), &resp); err != nil {
|
||||
// Ignore the decoding error and just drop the raw response
|
||||
return fmt.Errorf(
|
||||
"Error making API request.\n\n"+
|
||||
"URL: %s %s\n"+
|
||||
"Code: %d. Raw Message:\n\n%s",
|
||||
r.Request.Method, r.Request.URL.String(),
|
||||
r.StatusCode, bodyBuf.String())
|
||||
}
|
||||
|
||||
var errBody bytes.Buffer
|
||||
errBody.WriteString(fmt.Sprintf(
|
||||
"Error making API request.\n\n"+
|
||||
"URL: %s %s\n"+
|
||||
"Code: %d. Errors:\n\n",
|
||||
r.Request.Method, r.Request.URL.String(),
|
||||
r.StatusCode))
|
||||
for _, err := range resp.Errors {
|
||||
errBody.WriteString(fmt.Sprintf("* %s", err))
|
||||
}
|
||||
|
||||
return fmt.Errorf(errBody.String())
|
||||
}
|
||||
|
||||
// ErrorResponse is the raw structure of errors when they're returned by the
|
||||
// HTTP API.
|
||||
type ErrorResponse struct {
|
||||
Errors []string
|
||||
}
|
68
vendor/github.com/hashicorp/vault/api/secret.go
generated
vendored
Normal file
68
vendor/github.com/hashicorp/vault/api/secret.go
generated
vendored
Normal file
|
@ -0,0 +1,68 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/helper/jsonutil"
|
||||
)
|
||||
|
||||
// Secret is the structure returned for every secret within Vault.
|
||||
type Secret struct {
|
||||
// The request ID that generated this response
|
||||
RequestID string `json:"request_id"`
|
||||
|
||||
LeaseID string `json:"lease_id"`
|
||||
LeaseDuration int `json:"lease_duration"`
|
||||
Renewable bool `json:"renewable"`
|
||||
|
||||
// Data is the actual contents of the secret. The format of the data
|
||||
// is arbitrary and up to the secret backend.
|
||||
Data map[string]interface{} `json:"data"`
|
||||
|
||||
// Warnings contains any warnings related to the operation. These
|
||||
// are not issues that caused the command to fail, but that the
|
||||
// client should be aware of.
|
||||
Warnings []string `json:"warnings"`
|
||||
|
||||
// Auth, if non-nil, means that there was authentication information
|
||||
// attached to this response.
|
||||
Auth *SecretAuth `json:"auth,omitempty"`
|
||||
|
||||
// WrapInfo, if non-nil, means that the initial response was wrapped in the
|
||||
// cubbyhole of the given token (which has a TTL of the given number of
|
||||
// seconds)
|
||||
WrapInfo *SecretWrapInfo `json:"wrap_info,omitempty"`
|
||||
}
|
||||
|
||||
// SecretWrapInfo contains wrapping information if we have it. If what is
|
||||
// contained is an authentication token, the accessor for the token will be
|
||||
// available in WrappedAccessor.
|
||||
type SecretWrapInfo struct {
|
||||
Token string `json:"token"`
|
||||
TTL int `json:"ttl"`
|
||||
CreationTime time.Time `json:"creation_time"`
|
||||
WrappedAccessor string `json:"wrapped_accessor"`
|
||||
}
|
||||
|
||||
// SecretAuth is the structure containing auth information if we have it.
|
||||
type SecretAuth struct {
|
||||
ClientToken string `json:"client_token"`
|
||||
Accessor string `json:"accessor"`
|
||||
Policies []string `json:"policies"`
|
||||
Metadata map[string]string `json:"metadata"`
|
||||
|
||||
LeaseDuration int `json:"lease_duration"`
|
||||
Renewable bool `json:"renewable"`
|
||||
}
|
||||
|
||||
// ParseSecret is used to parse a secret value from JSON from an io.Reader.
|
||||
func ParseSecret(r io.Reader) (*Secret, error) {
|
||||
// First decode the JSON into a map[string]interface{}
|
||||
var secret Secret
|
||||
if err := jsonutil.DecodeJSONFromReader(r, &secret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &secret, nil
|
||||
}
|
38
vendor/github.com/hashicorp/vault/api/ssh.go
generated
vendored
Normal file
38
vendor/github.com/hashicorp/vault/api/ssh.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
package api
|
||||
|
||||
import "fmt"
|
||||
|
||||
// SSH is used to return a client to invoke operations on SSH backend.
|
||||
type SSH struct {
|
||||
c *Client
|
||||
MountPoint string
|
||||
}
|
||||
|
||||
// SSH returns the client for logical-backend API calls.
|
||||
func (c *Client) SSH() *SSH {
|
||||
return c.SSHWithMountPoint(SSHHelperDefaultMountPoint)
|
||||
}
|
||||
|
||||
// SSHWithMountPoint returns the client with specific SSH mount point.
|
||||
func (c *Client) SSHWithMountPoint(mountPoint string) *SSH {
|
||||
return &SSH{
|
||||
c: c,
|
||||
MountPoint: mountPoint,
|
||||
}
|
||||
}
|
||||
|
||||
// Credential invokes the SSH backend API to create a credential to establish an SSH session.
|
||||
func (c *SSH) Credential(role string, data map[string]interface{}) (*Secret, error) {
|
||||
r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/%s/creds/%s", c.MountPoint, role))
|
||||
if err := r.SetJSONBody(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return ParseSecret(resp.Body)
|
||||
}
|
257
vendor/github.com/hashicorp/vault/api/ssh_agent.go
generated
vendored
Normal file
257
vendor/github.com/hashicorp/vault/api/ssh_agent.go
generated
vendored
Normal file
|
@ -0,0 +1,257 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/go-rootcerts"
|
||||
"github.com/hashicorp/hcl"
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
const (
|
||||
// SSHHelperDefaultMountPoint is the default path at which SSH backend will be
|
||||
// mounted in the Vault server.
|
||||
SSHHelperDefaultMountPoint = "ssh"
|
||||
|
||||
// VerifyEchoRequest is the echo request message sent as OTP by the helper.
|
||||
VerifyEchoRequest = "verify-echo-request"
|
||||
|
||||
// VerifyEchoResponse is the echo response message sent as a response to OTP
|
||||
// matching echo request.
|
||||
VerifyEchoResponse = "verify-echo-response"
|
||||
)
|
||||
|
||||
// SSHHelper is a structure representing a vault-ssh-helper which can talk to vault server
|
||||
// in order to verify the OTP entered by the user. It contains the path at which
|
||||
// SSH backend is mounted at the server.
|
||||
type SSHHelper struct {
|
||||
c *Client
|
||||
MountPoint string
|
||||
}
|
||||
|
||||
// SSHVerifyResponse is a structure representing the fields in Vault server's
|
||||
// response.
|
||||
type SSHVerifyResponse struct {
|
||||
// Usually empty. If the request OTP is echo request message, this will
|
||||
// be set to the corresponding echo response message.
|
||||
Message string `json:"message" structs:"message" mapstructure:"message"`
|
||||
|
||||
// Username associated with the OTP
|
||||
Username string `json:"username" structs:"username" mapstructure:"username"`
|
||||
|
||||
// IP associated with the OTP
|
||||
IP string `json:"ip" structs:"ip" mapstructure:"ip"`
|
||||
|
||||
// Name of the role against which the OTP was issued
|
||||
RoleName string `json:"role_name" structs:"role_name" mapstructure:"role_name"`
|
||||
}
|
||||
|
||||
// SSHHelperConfig is a structure which represents the entries from the vault-ssh-helper's configuration file.
|
||||
type SSHHelperConfig struct {
|
||||
VaultAddr string `hcl:"vault_addr"`
|
||||
SSHMountPoint string `hcl:"ssh_mount_point"`
|
||||
CACert string `hcl:"ca_cert"`
|
||||
CAPath string `hcl:"ca_path"`
|
||||
AllowedCidrList string `hcl:"allowed_cidr_list"`
|
||||
AllowedRoles string `hcl:"allowed_roles"`
|
||||
TLSSkipVerify bool `hcl:"tls_skip_verify"`
|
||||
TLSServerName string `hcl:"tls_server_name"`
|
||||
}
|
||||
|
||||
// SetTLSParameters sets the TLS parameters for this SSH agent.
|
||||
func (c *SSHHelperConfig) SetTLSParameters(clientConfig *Config, certPool *x509.CertPool) {
|
||||
tlsConfig := &tls.Config{
|
||||
InsecureSkipVerify: c.TLSSkipVerify,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
RootCAs: certPool,
|
||||
ServerName: c.TLSServerName,
|
||||
}
|
||||
|
||||
transport := cleanhttp.DefaultTransport()
|
||||
transport.TLSClientConfig = tlsConfig
|
||||
clientConfig.HttpClient.Transport = transport
|
||||
}
|
||||
|
||||
// Returns true if any of the following conditions are true:
|
||||
// * CA cert is configured
|
||||
// * CA path is configured
|
||||
// * configured to skip certificate verification
|
||||
// * TLS server name is configured
|
||||
//
|
||||
func (c *SSHHelperConfig) shouldSetTLSParameters() bool {
|
||||
return c.CACert != "" || c.CAPath != "" || c.TLSServerName != "" || c.TLSSkipVerify
|
||||
}
|
||||
|
||||
// NewClient returns a new client for the configuration. This client will be used by the
|
||||
// vault-ssh-helper to communicate with Vault server and verify the OTP entered by user.
|
||||
// If the configuration supplies Vault SSL certificates, then the client will
|
||||
// have TLS configured in its transport.
|
||||
func (c *SSHHelperConfig) NewClient() (*Client, error) {
|
||||
// Creating a default client configuration for communicating with vault server.
|
||||
clientConfig := DefaultConfig()
|
||||
|
||||
// Pointing the client to the actual address of vault server.
|
||||
clientConfig.Address = c.VaultAddr
|
||||
|
||||
// Check if certificates are provided via config file.
|
||||
if c.shouldSetTLSParameters() {
|
||||
rootConfig := &rootcerts.Config{
|
||||
CAFile: c.CACert,
|
||||
CAPath: c.CAPath,
|
||||
}
|
||||
certPool, err := rootcerts.LoadCACerts(rootConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Enable TLS on the HTTP client information
|
||||
c.SetTLSParameters(clientConfig, certPool)
|
||||
}
|
||||
|
||||
// Creating the client object for the given configuration
|
||||
client, err := NewClient(clientConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// LoadSSHHelperConfig loads ssh-helper's configuration from the file and populates the corresponding
|
||||
// in-memory structure.
|
||||
//
|
||||
// Vault address is a required parameter.
|
||||
// Mount point defaults to "ssh".
|
||||
func LoadSSHHelperConfig(path string) (*SSHHelperConfig, error) {
|
||||
contents, err := ioutil.ReadFile(path)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, multierror.Prefix(err, "ssh_helper:")
|
||||
}
|
||||
return ParseSSHHelperConfig(string(contents))
|
||||
}
|
||||
|
||||
// ParseSSHHelperConfig parses the given contents as a string for the SSHHelper
|
||||
// configuration.
|
||||
func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) {
|
||||
root, err := hcl.Parse(string(contents))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ssh_helper: error parsing config: %s", err)
|
||||
}
|
||||
|
||||
list, ok := root.Node.(*ast.ObjectList)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("ssh_helper: error parsing config: file doesn't contain a root object")
|
||||
}
|
||||
|
||||
valid := []string{
|
||||
"vault_addr",
|
||||
"ssh_mount_point",
|
||||
"ca_cert",
|
||||
"ca_path",
|
||||
"allowed_cidr_list",
|
||||
"allowed_roles",
|
||||
"tls_skip_verify",
|
||||
"tls_server_name",
|
||||
}
|
||||
if err := checkHCLKeys(list, valid); err != nil {
|
||||
return nil, multierror.Prefix(err, "ssh_helper:")
|
||||
}
|
||||
|
||||
var c SSHHelperConfig
|
||||
c.SSHMountPoint = SSHHelperDefaultMountPoint
|
||||
if err := hcl.DecodeObject(&c, list); err != nil {
|
||||
return nil, multierror.Prefix(err, "ssh_helper:")
|
||||
}
|
||||
|
||||
if c.VaultAddr == "" {
|
||||
return nil, fmt.Errorf("ssh_helper: missing config 'vault_addr'")
|
||||
}
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
// SSHHelper creates an SSHHelper object which can talk to Vault server with SSH backend
|
||||
// mounted at default path ("ssh").
|
||||
func (c *Client) SSHHelper() *SSHHelper {
|
||||
return c.SSHHelperWithMountPoint(SSHHelperDefaultMountPoint)
|
||||
}
|
||||
|
||||
// SSHHelperWithMountPoint creates an SSHHelper object which can talk to Vault server with SSH backend
|
||||
// mounted at a specific mount point.
|
||||
func (c *Client) SSHHelperWithMountPoint(mountPoint string) *SSHHelper {
|
||||
return &SSHHelper{
|
||||
c: c,
|
||||
MountPoint: mountPoint,
|
||||
}
|
||||
}
|
||||
|
||||
// Verify verifies if the key provided by user is present in Vault server. The response
|
||||
// will contain the IP address and username associated with the OTP. In case the
|
||||
// OTP matches the echo request message, instead of searching an entry for the OTP,
|
||||
// an echo response message is returned. This feature is used by ssh-helper to verify if
|
||||
// its configured correctly.
|
||||
func (c *SSHHelper) Verify(otp string) (*SSHVerifyResponse, error) {
|
||||
data := map[string]interface{}{
|
||||
"otp": otp,
|
||||
}
|
||||
verifyPath := fmt.Sprintf("/v1/%s/verify", c.MountPoint)
|
||||
r := c.c.NewRequest("PUT", verifyPath)
|
||||
if err := r.SetJSONBody(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
secret, err := ParseSecret(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if secret.Data == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var verifyResp SSHVerifyResponse
|
||||
err = mapstructure.Decode(secret.Data, &verifyResp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &verifyResp, nil
|
||||
}
|
||||
|
||||
func checkHCLKeys(node ast.Node, valid []string) error {
|
||||
var list *ast.ObjectList
|
||||
switch n := node.(type) {
|
||||
case *ast.ObjectList:
|
||||
list = n
|
||||
case *ast.ObjectType:
|
||||
list = n.List
|
||||
default:
|
||||
return fmt.Errorf("cannot check HCL keys of type %T", n)
|
||||
}
|
||||
|
||||
validMap := make(map[string]struct{}, len(valid))
|
||||
for _, v := range valid {
|
||||
validMap[v] = struct{}{}
|
||||
}
|
||||
|
||||
var result error
|
||||
for _, item := range list.Items {
|
||||
key := item.Keys[0].Token.Value().(string)
|
||||
if _, ok := validMap[key]; !ok {
|
||||
result = multierror.Append(result, fmt.Errorf(
|
||||
"invalid key '%s' on line %d", key, item.Assign.Line))
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
11
vendor/github.com/hashicorp/vault/api/sys.go
generated
vendored
Normal file
11
vendor/github.com/hashicorp/vault/api/sys.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
package api
|
||||
|
||||
// Sys is used to perform system-related operations on Vault.
|
||||
type Sys struct {
|
||||
c *Client
|
||||
}
|
||||
|
||||
// Sys is used to return the client for sys-related API calls.
|
||||
func (c *Client) Sys() *Sys {
|
||||
return &Sys{c: c}
|
||||
}
|
128
vendor/github.com/hashicorp/vault/api/sys_audit.go
generated
vendored
Normal file
128
vendor/github.com/hashicorp/vault/api/sys_audit.go
generated
vendored
Normal file
|
@ -0,0 +1,128 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/fatih/structs"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
func (c *Sys) AuditHash(path string, input string) (string, error) {
|
||||
body := map[string]interface{}{
|
||||
"input": input,
|
||||
}
|
||||
|
||||
r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/audit-hash/%s", path))
|
||||
if err := r.SetJSONBody(body); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
type d struct {
|
||||
Hash string `json:"hash"`
|
||||
}
|
||||
|
||||
var result d
|
||||
err = resp.DecodeJSON(&result)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return result.Hash, err
|
||||
}
|
||||
|
||||
func (c *Sys) ListAudit() (map[string]*Audit, error) {
|
||||
r := c.c.NewRequest("GET", "/v1/sys/audit")
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result map[string]interface{}
|
||||
err = resp.DecodeJSON(&result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mounts := map[string]*Audit{}
|
||||
for k, v := range result {
|
||||
switch v.(type) {
|
||||
case map[string]interface{}:
|
||||
default:
|
||||
continue
|
||||
}
|
||||
var res Audit
|
||||
err = mapstructure.Decode(v, &res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Not a mount, some other api.Secret data
|
||||
if res.Type == "" {
|
||||
continue
|
||||
}
|
||||
mounts[k] = &res
|
||||
}
|
||||
|
||||
return mounts, nil
|
||||
}
|
||||
|
||||
// DEPRECATED: Use EnableAuditWithOptions instead
|
||||
func (c *Sys) EnableAudit(
|
||||
path string, auditType string, desc string, opts map[string]string) error {
|
||||
return c.EnableAuditWithOptions(path, &EnableAuditOptions{
|
||||
Type: auditType,
|
||||
Description: desc,
|
||||
Options: opts,
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Sys) EnableAuditWithOptions(path string, options *EnableAuditOptions) error {
|
||||
body := structs.Map(options)
|
||||
|
||||
r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/audit/%s", path))
|
||||
if err := r.SetJSONBody(body); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Sys) DisableAudit(path string) error {
|
||||
r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/audit/%s", path))
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Structures for the requests/resposne are all down here. They aren't
|
||||
// individually documented because the map almost directly to the raw HTTP API
|
||||
// documentation. Please refer to that documentation for more details.
|
||||
|
||||
type EnableAuditOptions struct {
|
||||
Type string `json:"type" structs:"type"`
|
||||
Description string `json:"description" structs:"description"`
|
||||
Options map[string]string `json:"options" structs:"options"`
|
||||
Local bool `json:"local" structs:"local"`
|
||||
}
|
||||
|
||||
type Audit struct {
|
||||
Path string
|
||||
Type string
|
||||
Description string
|
||||
Options map[string]string
|
||||
Local bool
|
||||
}
|
100
vendor/github.com/hashicorp/vault/api/sys_auth.go
generated
vendored
Normal file
100
vendor/github.com/hashicorp/vault/api/sys_auth.go
generated
vendored
Normal file
|
@ -0,0 +1,100 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/fatih/structs"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
func (c *Sys) ListAuth() (map[string]*AuthMount, error) {
|
||||
r := c.c.NewRequest("GET", "/v1/sys/auth")
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result map[string]interface{}
|
||||
err = resp.DecodeJSON(&result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mounts := map[string]*AuthMount{}
|
||||
for k, v := range result {
|
||||
switch v.(type) {
|
||||
case map[string]interface{}:
|
||||
default:
|
||||
continue
|
||||
}
|
||||
var res AuthMount
|
||||
err = mapstructure.Decode(v, &res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Not a mount, some other api.Secret data
|
||||
if res.Type == "" {
|
||||
continue
|
||||
}
|
||||
mounts[k] = &res
|
||||
}
|
||||
|
||||
return mounts, nil
|
||||
}
|
||||
|
||||
// DEPRECATED: Use EnableAuthWithOptions instead
|
||||
func (c *Sys) EnableAuth(path, authType, desc string) error {
|
||||
return c.EnableAuthWithOptions(path, &EnableAuthOptions{
|
||||
Type: authType,
|
||||
Description: desc,
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Sys) EnableAuthWithOptions(path string, options *EnableAuthOptions) error {
|
||||
body := structs.Map(options)
|
||||
|
||||
r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/auth/%s", path))
|
||||
if err := r.SetJSONBody(body); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Sys) DisableAuth(path string) error {
|
||||
r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/auth/%s", path))
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Structures for the requests/resposne are all down here. They aren't
|
||||
// individually documentd because the map almost directly to the raw HTTP API
|
||||
// documentation. Please refer to that documentation for more details.
|
||||
|
||||
type EnableAuthOptions struct {
|
||||
Type string `json:"type" structs:"type"`
|
||||
Description string `json:"description" structs:"description"`
|
||||
Local bool `json:"local" structs:"local"`
|
||||
}
|
||||
|
||||
type AuthMount struct {
|
||||
Type string `json:"type" structs:"type" mapstructure:"type"`
|
||||
Description string `json:"description" structs:"description" mapstructure:"description"`
|
||||
Config AuthConfigOutput `json:"config" structs:"config" mapstructure:"config"`
|
||||
Local bool `json:"local" structs:"local" mapstructure:"local"`
|
||||
}
|
||||
|
||||
type AuthConfigOutput struct {
|
||||
DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
|
||||
MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
|
||||
}
|
43
vendor/github.com/hashicorp/vault/api/sys_capabilities.go
generated
vendored
Normal file
43
vendor/github.com/hashicorp/vault/api/sys_capabilities.go
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
package api
|
||||
|
||||
import "fmt"
|
||||
|
||||
func (c *Sys) CapabilitiesSelf(path string) ([]string, error) {
|
||||
return c.Capabilities(c.c.Token(), path)
|
||||
}
|
||||
|
||||
func (c *Sys) Capabilities(token, path string) ([]string, error) {
|
||||
body := map[string]string{
|
||||
"token": token,
|
||||
"path": path,
|
||||
}
|
||||
|
||||
reqPath := "/v1/sys/capabilities"
|
||||
if token == c.c.Token() {
|
||||
reqPath = fmt.Sprintf("%s-self", reqPath)
|
||||
}
|
||||
|
||||
r := c.c.NewRequest("POST", reqPath)
|
||||
if err := r.SetJSONBody(body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result map[string]interface{}
|
||||
err = resp.DecodeJSON(&result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var capabilities []string
|
||||
capabilitiesRaw := result["capabilities"].([]interface{})
|
||||
for _, capability := range capabilitiesRaw {
|
||||
capabilities = append(capabilities, capability.(string))
|
||||
}
|
||||
return capabilities, nil
|
||||
}
|
77
vendor/github.com/hashicorp/vault/api/sys_generate_root.go
generated
vendored
Normal file
77
vendor/github.com/hashicorp/vault/api/sys_generate_root.go
generated
vendored
Normal file
|
@ -0,0 +1,77 @@
|
|||
package api
|
||||
|
||||
func (c *Sys) GenerateRootStatus() (*GenerateRootStatusResponse, error) {
|
||||
r := c.c.NewRequest("GET", "/v1/sys/generate-root/attempt")
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result GenerateRootStatusResponse
|
||||
err = resp.DecodeJSON(&result)
|
||||
return &result, err
|
||||
}
|
||||
|
||||
func (c *Sys) GenerateRootInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) {
|
||||
body := map[string]interface{}{
|
||||
"otp": otp,
|
||||
"pgp_key": pgpKey,
|
||||
}
|
||||
|
||||
r := c.c.NewRequest("PUT", "/v1/sys/generate-root/attempt")
|
||||
if err := r.SetJSONBody(body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result GenerateRootStatusResponse
|
||||
err = resp.DecodeJSON(&result)
|
||||
return &result, err
|
||||
}
|
||||
|
||||
func (c *Sys) GenerateRootCancel() error {
|
||||
r := c.c.NewRequest("DELETE", "/v1/sys/generate-root/attempt")
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Sys) GenerateRootUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) {
|
||||
body := map[string]interface{}{
|
||||
"key": shard,
|
||||
"nonce": nonce,
|
||||
}
|
||||
|
||||
r := c.c.NewRequest("PUT", "/v1/sys/generate-root/update")
|
||||
if err := r.SetJSONBody(body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result GenerateRootStatusResponse
|
||||
err = resp.DecodeJSON(&result)
|
||||
return &result, err
|
||||
}
|
||||
|
||||
type GenerateRootStatusResponse struct {
|
||||
Nonce string
|
||||
Started bool
|
||||
Progress int
|
||||
Required int
|
||||
Complete bool
|
||||
EncodedRootToken string `json:"encoded_root_token"`
|
||||
PGPFingerprint string `json:"pgp_fingerprint"`
|
||||
}
|
54
vendor/github.com/hashicorp/vault/api/sys_init.go
generated
vendored
Normal file
54
vendor/github.com/hashicorp/vault/api/sys_init.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
package api
|
||||
|
||||
func (c *Sys) InitStatus() (bool, error) {
|
||||
r := c.c.NewRequest("GET", "/v1/sys/init")
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result InitStatusResponse
|
||||
err = resp.DecodeJSON(&result)
|
||||
return result.Initialized, err
|
||||
}
|
||||
|
||||
func (c *Sys) Init(opts *InitRequest) (*InitResponse, error) {
|
||||
r := c.c.NewRequest("PUT", "/v1/sys/init")
|
||||
if err := r.SetJSONBody(opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result InitResponse
|
||||
err = resp.DecodeJSON(&result)
|
||||
return &result, err
|
||||
}
|
||||
|
||||
type InitRequest struct {
|
||||
SecretShares int `json:"secret_shares"`
|
||||
SecretThreshold int `json:"secret_threshold"`
|
||||
StoredShares int `json:"stored_shares"`
|
||||
PGPKeys []string `json:"pgp_keys"`
|
||||
RecoveryShares int `json:"recovery_shares"`
|
||||
RecoveryThreshold int `json:"recovery_threshold"`
|
||||
RecoveryPGPKeys []string `json:"recovery_pgp_keys"`
|
||||
RootTokenPGPKey string `json:"root_token_pgp_key"`
|
||||
}
|
||||
|
||||
type InitStatusResponse struct {
|
||||
Initialized bool
|
||||
}
|
||||
|
||||
type InitResponse struct {
|
||||
Keys []string `json:"keys"`
|
||||
KeysB64 []string `json:"keys_base64"`
|
||||
RecoveryKeys []string `json:"recovery_keys"`
|
||||
RecoveryKeysB64 []string `json:"recovery_keys_base64"`
|
||||
RootToken string `json:"root_token"`
|
||||
}
|
20
vendor/github.com/hashicorp/vault/api/sys_leader.go
generated
vendored
Normal file
20
vendor/github.com/hashicorp/vault/api/sys_leader.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
package api
|
||||
|
||||
func (c *Sys) Leader() (*LeaderResponse, error) {
|
||||
r := c.c.NewRequest("GET", "/v1/sys/leader")
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result LeaderResponse
|
||||
err = resp.DecodeJSON(&result)
|
||||
return &result, err
|
||||
}
|
||||
|
||||
type LeaderResponse struct {
|
||||
HAEnabled bool `json:"ha_enabled"`
|
||||
IsSelf bool `json:"is_self"`
|
||||
LeaderAddress string `json:"leader_address"`
|
||||
}
|
48
vendor/github.com/hashicorp/vault/api/sys_lease.go
generated
vendored
Normal file
48
vendor/github.com/hashicorp/vault/api/sys_lease.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
package api
|
||||
|
||||
func (c *Sys) Renew(id string, increment int) (*Secret, error) {
|
||||
r := c.c.NewRequest("PUT", "/v1/sys/renew")
|
||||
|
||||
body := map[string]interface{}{
|
||||
"increment": increment,
|
||||
"lease_id": id,
|
||||
}
|
||||
if err := r.SetJSONBody(body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return ParseSecret(resp.Body)
|
||||
}
|
||||
|
||||
func (c *Sys) Revoke(id string) error {
|
||||
r := c.c.NewRequest("PUT", "/v1/sys/revoke/"+id)
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Sys) RevokePrefix(id string) error {
|
||||
r := c.c.NewRequest("PUT", "/v1/sys/revoke-prefix/"+id)
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Sys) RevokeForce(id string) error {
|
||||
r := c.c.NewRequest("PUT", "/v1/sys/revoke-force/"+id)
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
return err
|
||||
}
|
146
vendor/github.com/hashicorp/vault/api/sys_mounts.go
generated
vendored
Normal file
146
vendor/github.com/hashicorp/vault/api/sys_mounts.go
generated
vendored
Normal file
|
@ -0,0 +1,146 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/fatih/structs"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
func (c *Sys) ListMounts() (map[string]*MountOutput, error) {
|
||||
r := c.c.NewRequest("GET", "/v1/sys/mounts")
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result map[string]interface{}
|
||||
err = resp.DecodeJSON(&result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mounts := map[string]*MountOutput{}
|
||||
for k, v := range result {
|
||||
switch v.(type) {
|
||||
case map[string]interface{}:
|
||||
default:
|
||||
continue
|
||||
}
|
||||
var res MountOutput
|
||||
err = mapstructure.Decode(v, &res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Not a mount, some other api.Secret data
|
||||
if res.Type == "" {
|
||||
continue
|
||||
}
|
||||
mounts[k] = &res
|
||||
}
|
||||
|
||||
return mounts, nil
|
||||
}
|
||||
|
||||
func (c *Sys) Mount(path string, mountInfo *MountInput) error {
|
||||
body := structs.Map(mountInfo)
|
||||
|
||||
r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/mounts/%s", path))
|
||||
if err := r.SetJSONBody(body); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Sys) Unmount(path string) error {
|
||||
r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/mounts/%s", path))
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Sys) Remount(from, to string) error {
|
||||
body := map[string]interface{}{
|
||||
"from": from,
|
||||
"to": to,
|
||||
}
|
||||
|
||||
r := c.c.NewRequest("POST", "/v1/sys/remount")
|
||||
if err := r.SetJSONBody(body); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Sys) TuneMount(path string, config MountConfigInput) error {
|
||||
body := structs.Map(config)
|
||||
r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/mounts/%s/tune", path))
|
||||
if err := r.SetJSONBody(body); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Sys) MountConfig(path string) (*MountConfigOutput, error) {
|
||||
r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/mounts/%s/tune", path))
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result MountConfigOutput
|
||||
err = resp.DecodeJSON(&result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &result, err
|
||||
}
|
||||
|
||||
type MountInput struct {
|
||||
Type string `json:"type" structs:"type"`
|
||||
Description string `json:"description" structs:"description"`
|
||||
Config MountConfigInput `json:"config" structs:"config"`
|
||||
Local bool `json:"local" structs:"local"`
|
||||
}
|
||||
|
||||
type MountConfigInput struct {
|
||||
DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
|
||||
MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
|
||||
ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
|
||||
}
|
||||
|
||||
type MountOutput struct {
|
||||
Type string `json:"type" structs:"type"`
|
||||
Description string `json:"description" structs:"description"`
|
||||
Config MountConfigOutput `json:"config" structs:"config"`
|
||||
Local bool `json:"local" structs:"local"`
|
||||
}
|
||||
|
||||
type MountConfigOutput struct {
|
||||
DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
|
||||
MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
|
||||
ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
|
||||
}
|
95
vendor/github.com/hashicorp/vault/api/sys_policy.go
generated
vendored
Normal file
95
vendor/github.com/hashicorp/vault/api/sys_policy.go
generated
vendored
Normal file
|
@ -0,0 +1,95 @@
|
|||
package api
|
||||
|
||||
import "fmt"
|
||||
|
||||
func (c *Sys) ListPolicies() ([]string, error) {
|
||||
r := c.c.NewRequest("GET", "/v1/sys/policy")
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result map[string]interface{}
|
||||
err = resp.DecodeJSON(&result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ok bool
|
||||
if _, ok = result["policies"]; !ok {
|
||||
return nil, fmt.Errorf("policies not found in response")
|
||||
}
|
||||
|
||||
listRaw := result["policies"].([]interface{})
|
||||
var policies []string
|
||||
|
||||
for _, val := range listRaw {
|
||||
policies = append(policies, val.(string))
|
||||
}
|
||||
|
||||
return policies, err
|
||||
}
|
||||
|
||||
func (c *Sys) GetPolicy(name string) (string, error) {
|
||||
r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/policy/%s", name))
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if resp != nil {
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode == 404 {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var result map[string]interface{}
|
||||
err = resp.DecodeJSON(&result)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var ok bool
|
||||
if _, ok = result["rules"]; !ok {
|
||||
return "", fmt.Errorf("rules not found in response")
|
||||
}
|
||||
|
||||
return result["rules"].(string), nil
|
||||
}
|
||||
|
||||
func (c *Sys) PutPolicy(name, rules string) error {
|
||||
body := map[string]string{
|
||||
"rules": rules,
|
||||
}
|
||||
|
||||
r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/policy/%s", name))
|
||||
if err := r.SetJSONBody(body); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Sys) DeletePolicy(name string) error {
|
||||
r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/policy/%s", name))
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type getPoliciesResp struct {
|
||||
Rules string `json:"rules"`
|
||||
}
|
||||
|
||||
type listPoliciesResp struct {
|
||||
Policies []string `json:"policies"`
|
||||
}
|
202
vendor/github.com/hashicorp/vault/api/sys_rekey.go
generated
vendored
Normal file
202
vendor/github.com/hashicorp/vault/api/sys_rekey.go
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
package api
|
||||
|
||||
func (c *Sys) RekeyStatus() (*RekeyStatusResponse, error) {
|
||||
r := c.c.NewRequest("GET", "/v1/sys/rekey/init")
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result RekeyStatusResponse
|
||||
err = resp.DecodeJSON(&result)
|
||||
return &result, err
|
||||
}
|
||||
|
||||
func (c *Sys) RekeyRecoveryKeyStatus() (*RekeyStatusResponse, error) {
|
||||
r := c.c.NewRequest("GET", "/v1/sys/rekey-recovery-key/init")
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result RekeyStatusResponse
|
||||
err = resp.DecodeJSON(&result)
|
||||
return &result, err
|
||||
}
|
||||
|
||||
func (c *Sys) RekeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) {
|
||||
r := c.c.NewRequest("PUT", "/v1/sys/rekey/init")
|
||||
if err := r.SetJSONBody(config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result RekeyStatusResponse
|
||||
err = resp.DecodeJSON(&result)
|
||||
return &result, err
|
||||
}
|
||||
|
||||
func (c *Sys) RekeyRecoveryKeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) {
|
||||
r := c.c.NewRequest("PUT", "/v1/sys/rekey-recovery-key/init")
|
||||
if err := r.SetJSONBody(config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result RekeyStatusResponse
|
||||
err = resp.DecodeJSON(&result)
|
||||
return &result, err
|
||||
}
|
||||
|
||||
func (c *Sys) RekeyCancel() error {
|
||||
r := c.c.NewRequest("DELETE", "/v1/sys/rekey/init")
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Sys) RekeyRecoveryKeyCancel() error {
|
||||
r := c.c.NewRequest("DELETE", "/v1/sys/rekey-recovery-key/init")
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Sys) RekeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) {
|
||||
body := map[string]interface{}{
|
||||
"key": shard,
|
||||
"nonce": nonce,
|
||||
}
|
||||
|
||||
r := c.c.NewRequest("PUT", "/v1/sys/rekey/update")
|
||||
if err := r.SetJSONBody(body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result RekeyUpdateResponse
|
||||
err = resp.DecodeJSON(&result)
|
||||
return &result, err
|
||||
}
|
||||
|
||||
func (c *Sys) RekeyRecoveryKeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) {
|
||||
body := map[string]interface{}{
|
||||
"key": shard,
|
||||
"nonce": nonce,
|
||||
}
|
||||
|
||||
r := c.c.NewRequest("PUT", "/v1/sys/rekey-recovery-key/update")
|
||||
if err := r.SetJSONBody(body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result RekeyUpdateResponse
|
||||
err = resp.DecodeJSON(&result)
|
||||
return &result, err
|
||||
}
|
||||
|
||||
func (c *Sys) RekeyRetrieveBackup() (*RekeyRetrieveResponse, error) {
|
||||
r := c.c.NewRequest("GET", "/v1/sys/rekey/backup")
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result RekeyRetrieveResponse
|
||||
err = resp.DecodeJSON(&result)
|
||||
return &result, err
|
||||
}
|
||||
|
||||
func (c *Sys) RekeyRetrieveRecoveryBackup() (*RekeyRetrieveResponse, error) {
|
||||
r := c.c.NewRequest("GET", "/v1/sys/rekey/recovery-backup")
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result RekeyRetrieveResponse
|
||||
err = resp.DecodeJSON(&result)
|
||||
return &result, err
|
||||
}
|
||||
|
||||
func (c *Sys) RekeyDeleteBackup() error {
|
||||
r := c.c.NewRequest("DELETE", "/v1/sys/rekey/backup")
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Sys) RekeyDeleteRecoveryBackup() error {
|
||||
r := c.c.NewRequest("DELETE", "/v1/sys/rekey/recovery-backup")
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
type RekeyInitRequest struct {
|
||||
SecretShares int `json:"secret_shares"`
|
||||
SecretThreshold int `json:"secret_threshold"`
|
||||
PGPKeys []string `json:"pgp_keys"`
|
||||
Backup bool
|
||||
}
|
||||
|
||||
type RekeyStatusResponse struct {
|
||||
Nonce string
|
||||
Started bool
|
||||
T int
|
||||
N int
|
||||
Progress int
|
||||
Required int
|
||||
PGPFingerprints []string `json:"pgp_fingerprints"`
|
||||
Backup bool
|
||||
}
|
||||
|
||||
type RekeyUpdateResponse struct {
|
||||
Nonce string
|
||||
Complete bool
|
||||
Keys []string
|
||||
KeysB64 []string `json:"keys_base64"`
|
||||
PGPFingerprints []string `json:"pgp_fingerprints"`
|
||||
Backup bool
|
||||
}
|
||||
|
||||
type RekeyRetrieveResponse struct {
|
||||
Nonce string
|
||||
Keys map[string][]string
|
||||
KeysB64 map[string][]string `json:"keys_base64"`
|
||||
}
|
30
vendor/github.com/hashicorp/vault/api/sys_rotate.go
generated
vendored
Normal file
30
vendor/github.com/hashicorp/vault/api/sys_rotate.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
package api
|
||||
|
||||
import "time"
|
||||
|
||||
func (c *Sys) Rotate() error {
|
||||
r := c.c.NewRequest("POST", "/v1/sys/rotate")
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Sys) KeyStatus() (*KeyStatus, error) {
|
||||
r := c.c.NewRequest("GET", "/v1/sys/key-status")
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
result := new(KeyStatus)
|
||||
err = resp.DecodeJSON(result)
|
||||
return result, err
|
||||
}
|
||||
|
||||
type KeyStatus struct {
|
||||
Term int `json:"term"`
|
||||
InstallTime time.Time `json:"install_time"`
|
||||
}
|
60
vendor/github.com/hashicorp/vault/api/sys_seal.go
generated
vendored
Normal file
60
vendor/github.com/hashicorp/vault/api/sys_seal.go
generated
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
package api
|
||||
|
||||
func (c *Sys) SealStatus() (*SealStatusResponse, error) {
|
||||
r := c.c.NewRequest("GET", "/v1/sys/seal-status")
|
||||
return sealStatusRequest(c, r)
|
||||
}
|
||||
|
||||
func (c *Sys) Seal() error {
|
||||
r := c.c.NewRequest("PUT", "/v1/sys/seal")
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Sys) ResetUnsealProcess() (*SealStatusResponse, error) {
|
||||
body := map[string]interface{}{"reset": true}
|
||||
|
||||
r := c.c.NewRequest("PUT", "/v1/sys/unseal")
|
||||
if err := r.SetJSONBody(body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sealStatusRequest(c, r)
|
||||
}
|
||||
|
||||
func (c *Sys) Unseal(shard string) (*SealStatusResponse, error) {
|
||||
body := map[string]interface{}{"key": shard}
|
||||
|
||||
r := c.c.NewRequest("PUT", "/v1/sys/unseal")
|
||||
if err := r.SetJSONBody(body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sealStatusRequest(c, r)
|
||||
}
|
||||
|
||||
func sealStatusRequest(c *Sys, r *Request) (*SealStatusResponse, error) {
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result SealStatusResponse
|
||||
err = resp.DecodeJSON(&result)
|
||||
return &result, err
|
||||
}
|
||||
|
||||
type SealStatusResponse struct {
|
||||
Sealed bool `json:"sealed"`
|
||||
T int `json:"t"`
|
||||
N int `json:"n"`
|
||||
Progress int `json:"progress"`
|
||||
Nonce string `json:"nonce"`
|
||||
Version string `json:"version"`
|
||||
ClusterName string `json:"cluster_name,omitempty"`
|
||||
ClusterID string `json:"cluster_id,omitempty"`
|
||||
}
|
10
vendor/github.com/hashicorp/vault/api/sys_stepdown.go
generated
vendored
Normal file
10
vendor/github.com/hashicorp/vault/api/sys_stepdown.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
package api
|
||||
|
||||
func (c *Sys) StepDown() error {
|
||||
r := c.c.NewRequest("PUT", "/v1/sys/step-down")
|
||||
resp, err := c.c.RawRequest(r)
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
return err
|
||||
}
|
159
vendor/github.com/hashicorp/vault/helper/compressutil/compress.go
generated
vendored
Normal file
159
vendor/github.com/hashicorp/vault/helper/compressutil/compress.go
generated
vendored
Normal file
|
@ -0,0 +1,159 @@
|
|||
package compressutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"compress/lzw"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
// A byte value used as a canary prefix for the compressed information
|
||||
// which is used to distinguish if a JSON input is compressed or not.
|
||||
// The value of this constant should not be a first character of any
|
||||
// valid JSON string.
|
||||
|
||||
// Byte value used as canary when using Gzip format
|
||||
CompressionCanaryGzip byte = 'G'
|
||||
|
||||
// Byte value used as canary when using Lzw format
|
||||
CompressionCanaryLzw byte = 'L'
|
||||
|
||||
CompressionTypeLzw = "lzw"
|
||||
|
||||
CompressionTypeGzip = "gzip"
|
||||
)
|
||||
|
||||
// CompressionConfig is used to select a compression type to be performed by
|
||||
// Compress and Decompress utilities.
|
||||
// Supported types are:
|
||||
// * CompressionTypeLzw
|
||||
// * CompressionTypeGzip
|
||||
//
|
||||
// When using CompressionTypeGzip, the compression levels can also be chosen:
|
||||
// * gzip.DefaultCompression
|
||||
// * gzip.BestSpeed
|
||||
// * gzip.BestCompression
|
||||
type CompressionConfig struct {
|
||||
// Type of the compression algorithm to be used
|
||||
Type string
|
||||
|
||||
// When using Gzip format, the compression level to employ
|
||||
GzipCompressionLevel int
|
||||
}
|
||||
|
||||
// Compress places the canary byte in a buffer and uses the same buffer to fill
|
||||
// in the compressed information of the given input. The configuration supports
|
||||
// two type of compression: LZW and Gzip. When using Gzip compression format,
|
||||
// if GzipCompressionLevel is not specified, the 'gzip.DefaultCompression' will
|
||||
// be assumed.
|
||||
func Compress(data []byte, config *CompressionConfig) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
var writer io.WriteCloser
|
||||
var err error
|
||||
|
||||
if config == nil {
|
||||
return nil, fmt.Errorf("config is nil")
|
||||
}
|
||||
|
||||
// Write the canary into the buffer and create writer to compress the
|
||||
// input data based on the configured type
|
||||
switch config.Type {
|
||||
case CompressionTypeLzw:
|
||||
buf.Write([]byte{CompressionCanaryLzw})
|
||||
|
||||
writer = lzw.NewWriter(&buf, lzw.LSB, 8)
|
||||
case CompressionTypeGzip:
|
||||
buf.Write([]byte{CompressionCanaryGzip})
|
||||
|
||||
switch {
|
||||
case config.GzipCompressionLevel == gzip.BestCompression,
|
||||
config.GzipCompressionLevel == gzip.BestSpeed,
|
||||
config.GzipCompressionLevel == gzip.DefaultCompression:
|
||||
// These are valid compression levels
|
||||
default:
|
||||
// If compression level is set to NoCompression or to
|
||||
// any invalid value, fallback to Defaultcompression
|
||||
config.GzipCompressionLevel = gzip.DefaultCompression
|
||||
}
|
||||
writer, err = gzip.NewWriterLevel(&buf, config.GzipCompressionLevel)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported compression type")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create a compression writer; err: %v", err)
|
||||
}
|
||||
|
||||
if writer == nil {
|
||||
return nil, fmt.Errorf("failed to create a compression writer")
|
||||
}
|
||||
|
||||
// Compress the input and place it in the same buffer containing the
|
||||
// canary byte.
|
||||
if _, err = writer.Write(data); err != nil {
|
||||
return nil, fmt.Errorf("failed to compress input data; err: %v", err)
|
||||
}
|
||||
|
||||
// Close the io.WriteCloser
|
||||
if err = writer.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return the compressed bytes with canary byte at the start
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// Decompress checks if the first byte in the input matches the canary byte.
|
||||
// If the first byte is a canary byte, then the input past the canary byte
|
||||
// will be decompressed using the method specified in the given configuration.
|
||||
// If the first byte isn't a canary byte, then the utility returns a boolean
|
||||
// value indicating that the input was not compressed.
|
||||
func Decompress(data []byte) ([]byte, bool, error) {
|
||||
var err error
|
||||
var reader io.ReadCloser
|
||||
if data == nil || len(data) == 0 {
|
||||
return nil, false, fmt.Errorf("'data' being decompressed is empty")
|
||||
}
|
||||
|
||||
switch {
|
||||
case data[0] == CompressionCanaryGzip:
|
||||
// If the first byte matches the canary byte, remove the canary
|
||||
// byte and try to decompress the data that is after the canary.
|
||||
if len(data) < 2 {
|
||||
return nil, false, fmt.Errorf("invalid 'data' after the canary")
|
||||
}
|
||||
data = data[1:]
|
||||
reader, err = gzip.NewReader(bytes.NewReader(data))
|
||||
case data[0] == CompressionCanaryLzw:
|
||||
// If the first byte matches the canary byte, remove the canary
|
||||
// byte and try to decompress the data that is after the canary.
|
||||
if len(data) < 2 {
|
||||
return nil, false, fmt.Errorf("invalid 'data' after the canary")
|
||||
}
|
||||
data = data[1:]
|
||||
reader = lzw.NewReader(bytes.NewReader(data), lzw.LSB, 8)
|
||||
default:
|
||||
// If the first byte doesn't match the canary byte, it means
|
||||
// that the content was not compressed at all. Indicate the
|
||||
// caller that the input was not compressed.
|
||||
return nil, true, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("failed to create a compression reader; err: %v", err)
|
||||
}
|
||||
if reader == nil {
|
||||
return nil, false, fmt.Errorf("failed to create a compression reader")
|
||||
}
|
||||
|
||||
// Close the io.ReadCloser
|
||||
defer reader.Close()
|
||||
|
||||
// Read all the compressed data into a buffer
|
||||
var buf bytes.Buffer
|
||||
if _, err = io.Copy(&buf, reader); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), false, nil
|
||||
}
|
99
vendor/github.com/hashicorp/vault/helper/jsonutil/json.go
generated
vendored
Normal file
99
vendor/github.com/hashicorp/vault/helper/jsonutil/json.go
generated
vendored
Normal file
|
@ -0,0 +1,99 @@
|
|||
package jsonutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/hashicorp/vault/helper/compressutil"
|
||||
)
|
||||
|
||||
// Encodes/Marshals the given object into JSON
|
||||
func EncodeJSON(in interface{}) ([]byte, error) {
|
||||
if in == nil {
|
||||
return nil, fmt.Errorf("input for encoding is nil")
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
enc := json.NewEncoder(&buf)
|
||||
if err := enc.Encode(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// EncodeJSONAndCompress encodes the given input into JSON and compresses the
|
||||
// encoded value (using Gzip format BestCompression level, by default). A
|
||||
// canary byte is placed at the beginning of the returned bytes for the logic
|
||||
// in decompression method to identify compressed input.
|
||||
func EncodeJSONAndCompress(in interface{}, config *compressutil.CompressionConfig) ([]byte, error) {
|
||||
if in == nil {
|
||||
return nil, fmt.Errorf("input for encoding is nil")
|
||||
}
|
||||
|
||||
// First JSON encode the given input
|
||||
encodedBytes, err := EncodeJSON(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if config == nil {
|
||||
config = &compressutil.CompressionConfig{
|
||||
Type: compressutil.CompressionTypeGzip,
|
||||
GzipCompressionLevel: gzip.BestCompression,
|
||||
}
|
||||
}
|
||||
|
||||
return compressutil.Compress(encodedBytes, config)
|
||||
}
|
||||
|
||||
// DecodeJSON tries to decompress the given data. The call to decompress, fails
|
||||
// if the content was not compressed in the first place, which is identified by
|
||||
// a canary byte before the compressed data. If the data is not compressed, it
|
||||
// is JSON decoded directly. Otherwise the decompressed data will be JSON
|
||||
// decoded.
|
||||
func DecodeJSON(data []byte, out interface{}) error {
|
||||
if data == nil || len(data) == 0 {
|
||||
return fmt.Errorf("'data' being decoded is nil")
|
||||
}
|
||||
if out == nil {
|
||||
return fmt.Errorf("output parameter 'out' is nil")
|
||||
}
|
||||
|
||||
// Decompress the data if it was compressed in the first place
|
||||
decompressedBytes, uncompressed, err := compressutil.Decompress(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decompress JSON: err: %v", err)
|
||||
}
|
||||
if !uncompressed && (decompressedBytes == nil || len(decompressedBytes) == 0) {
|
||||
return fmt.Errorf("decompressed data being decoded is invalid")
|
||||
}
|
||||
|
||||
// If the input supplied failed to contain the compression canary, it
|
||||
// will be notified by the compression utility. Decode the decompressed
|
||||
// input.
|
||||
if !uncompressed {
|
||||
data = decompressedBytes
|
||||
}
|
||||
|
||||
return DecodeJSONFromReader(bytes.NewReader(data), out)
|
||||
}
|
||||
|
||||
// Decodes/Unmarshals the given io.Reader pointing to a JSON, into a desired object
|
||||
func DecodeJSONFromReader(r io.Reader, out interface{}) error {
|
||||
if r == nil {
|
||||
return fmt.Errorf("'io.Reader' being decoded is nil")
|
||||
}
|
||||
if out == nil {
|
||||
return fmt.Errorf("output parameter 'out' is nil")
|
||||
}
|
||||
|
||||
dec := json.NewDecoder(r)
|
||||
|
||||
// While decoding JSON values, intepret the integer values as `json.Number`s instead of `float64`.
|
||||
dec.UseNumber()
|
||||
|
||||
// Since 'out' is an interface representing a pointer, pass it to the decoder without an '&'
|
||||
return dec.Decode(out)
|
||||
}
|
21
vendor/github.com/mitchellh/go-homedir/LICENSE
generated
vendored
Normal file
21
vendor/github.com/mitchellh/go-homedir/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Mitchell Hashimoto
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
14
vendor/github.com/mitchellh/go-homedir/README.md
generated
vendored
Normal file
14
vendor/github.com/mitchellh/go-homedir/README.md
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
# go-homedir
|
||||
|
||||
This is a Go library for detecting the user's home directory without
|
||||
the use of cgo, so the library can be used in cross-compilation environments.
|
||||
|
||||
Usage is incredibly simple, just call `homedir.Dir()` to get the home directory
|
||||
for a user, and `homedir.Expand()` to expand the `~` in a path to the home
|
||||
directory.
|
||||
|
||||
**Why not just use `os/user`?** The built-in `os/user` package requires
|
||||
cgo on Darwin systems. This means that any Go code that uses that package
|
||||
cannot cross compile. But 99% of the time the use for `os/user` is just to
|
||||
retrieve the home directory, which we can do for the current user without
|
||||
cgo. This library does that, enabling cross-compilation.
|
132
vendor/github.com/mitchellh/go-homedir/homedir.go
generated
vendored
Normal file
132
vendor/github.com/mitchellh/go-homedir/homedir.go
generated
vendored
Normal file
|
@ -0,0 +1,132 @@
|
|||
package homedir
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// DisableCache will disable caching of the home directory. Caching is enabled
|
||||
// by default.
|
||||
var DisableCache bool
|
||||
|
||||
var homedirCache string
|
||||
var cacheLock sync.RWMutex
|
||||
|
||||
// Dir returns the home directory for the executing user.
|
||||
//
|
||||
// This uses an OS-specific method for discovering the home directory.
|
||||
// An error is returned if a home directory cannot be detected.
|
||||
func Dir() (string, error) {
|
||||
if !DisableCache {
|
||||
cacheLock.RLock()
|
||||
cached := homedirCache
|
||||
cacheLock.RUnlock()
|
||||
if cached != "" {
|
||||
return cached, nil
|
||||
}
|
||||
}
|
||||
|
||||
cacheLock.Lock()
|
||||
defer cacheLock.Unlock()
|
||||
|
||||
var result string
|
||||
var err error
|
||||
if runtime.GOOS == "windows" {
|
||||
result, err = dirWindows()
|
||||
} else {
|
||||
// Unix-like system, so just assume Unix
|
||||
result, err = dirUnix()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
homedirCache = result
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Expand expands the path to include the home directory if the path
|
||||
// is prefixed with `~`. If it isn't prefixed with `~`, the path is
|
||||
// returned as-is.
|
||||
func Expand(path string) (string, error) {
|
||||
if len(path) == 0 {
|
||||
return path, nil
|
||||
}
|
||||
|
||||
if path[0] != '~' {
|
||||
return path, nil
|
||||
}
|
||||
|
||||
if len(path) > 1 && path[1] != '/' && path[1] != '\\' {
|
||||
return "", errors.New("cannot expand user-specific home dir")
|
||||
}
|
||||
|
||||
dir, err := Dir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return filepath.Join(dir, path[1:]), nil
|
||||
}
|
||||
|
||||
func dirUnix() (string, error) {
|
||||
// First prefer the HOME environmental variable
|
||||
if home := os.Getenv("HOME"); home != "" {
|
||||
return home, nil
|
||||
}
|
||||
|
||||
// If that fails, try getent
|
||||
var stdout bytes.Buffer
|
||||
cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
|
||||
cmd.Stdout = &stdout
|
||||
if err := cmd.Run(); err != nil {
|
||||
// If "getent" is missing, ignore it
|
||||
if err == exec.ErrNotFound {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
|
||||
// username:password:uid:gid:gecos:home:shell
|
||||
passwdParts := strings.SplitN(passwd, ":", 7)
|
||||
if len(passwdParts) > 5 {
|
||||
return passwdParts[5], nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If all else fails, try the shell
|
||||
stdout.Reset()
|
||||
cmd = exec.Command("sh", "-c", "cd && pwd")
|
||||
cmd.Stdout = &stdout
|
||||
if err := cmd.Run(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
result := strings.TrimSpace(stdout.String())
|
||||
if result == "" {
|
||||
return "", errors.New("blank output when reading home directory")
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func dirWindows() (string, error) {
|
||||
drive := os.Getenv("HOMEDRIVE")
|
||||
path := os.Getenv("HOMEPATH")
|
||||
home := drive + path
|
||||
if drive == "" || path == "" {
|
||||
home = os.Getenv("USERPROFILE")
|
||||
}
|
||||
if home == "" {
|
||||
return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank")
|
||||
}
|
||||
|
||||
return home, nil
|
||||
}
|
7
vendor/github.com/mitchellh/mapstructure/.travis.yml
generated
vendored
Normal file
7
vendor/github.com/mitchellh/mapstructure/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
|
||||
script:
|
||||
- go test
|
21
vendor/github.com/mitchellh/mapstructure/LICENSE
generated
vendored
Normal file
21
vendor/github.com/mitchellh/mapstructure/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Mitchell Hashimoto
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
46
vendor/github.com/mitchellh/mapstructure/README.md
generated
vendored
Normal file
46
vendor/github.com/mitchellh/mapstructure/README.md
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
# mapstructure
|
||||
|
||||
mapstructure is a Go library for decoding generic map values to structures
|
||||
and vice versa, while providing helpful error handling.
|
||||
|
||||
This library is most useful when decoding values from some data stream (JSON,
|
||||
Gob, etc.) where you don't _quite_ know the structure of the underlying data
|
||||
until you read a part of it. You can therefore read a `map[string]interface{}`
|
||||
and use this library to decode it into the proper underlying native Go
|
||||
structure.
|
||||
|
||||
## Installation
|
||||
|
||||
Standard `go get`:
|
||||
|
||||
```
|
||||
$ go get github.com/mitchellh/mapstructure
|
||||
```
|
||||
|
||||
## Usage & Example
|
||||
|
||||
For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure).
|
||||
|
||||
The `Decode` function has examples associated with it there.
|
||||
|
||||
## But Why?!
|
||||
|
||||
Go offers fantastic standard libraries for decoding formats such as JSON.
|
||||
The standard method is to have a struct pre-created, and populate that struct
|
||||
from the bytes of the encoded format. This is great, but the problem is if
|
||||
you have configuration or an encoding that changes slightly depending on
|
||||
specific fields. For example, consider this JSON:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "person",
|
||||
"name": "Mitchell"
|
||||
}
|
||||
```
|
||||
|
||||
Perhaps we can't populate a specific structure without first reading
|
||||
the "type" field from the JSON. We could always do two passes over the
|
||||
decoding of the JSON (reading the "type" first, and the rest later).
|
||||
However, it is much simpler to just decode this into a `map[string]interface{}`
|
||||
structure, read the "type" key, then use something like this library
|
||||
to decode it into the proper structure.
|
154
vendor/github.com/mitchellh/mapstructure/decode_hooks.go
generated
vendored
Normal file
154
vendor/github.com/mitchellh/mapstructure/decode_hooks.go
generated
vendored
Normal file
|
@ -0,0 +1,154 @@
|
|||
package mapstructure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
|
||||
// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
|
||||
func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
|
||||
// Create variables here so we can reference them with the reflect pkg
|
||||
var f1 DecodeHookFuncType
|
||||
var f2 DecodeHookFuncKind
|
||||
|
||||
// Fill in the variables into this interface and the rest is done
|
||||
// automatically using the reflect package.
|
||||
potential := []interface{}{f1, f2}
|
||||
|
||||
v := reflect.ValueOf(h)
|
||||
vt := v.Type()
|
||||
for _, raw := range potential {
|
||||
pt := reflect.ValueOf(raw).Type()
|
||||
if vt.ConvertibleTo(pt) {
|
||||
return v.Convert(pt).Interface()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DecodeHookExec executes the given decode hook. This should be used
|
||||
// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
|
||||
// that took reflect.Kind instead of reflect.Type.
|
||||
func DecodeHookExec(
|
||||
raw DecodeHookFunc,
|
||||
from reflect.Type, to reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
// Build our arguments that reflect expects
|
||||
argVals := make([]reflect.Value, 3)
|
||||
argVals[0] = reflect.ValueOf(from)
|
||||
argVals[1] = reflect.ValueOf(to)
|
||||
argVals[2] = reflect.ValueOf(data)
|
||||
|
||||
switch f := typedDecodeHook(raw).(type) {
|
||||
case DecodeHookFuncType:
|
||||
return f(from, to, data)
|
||||
case DecodeHookFuncKind:
|
||||
return f(from.Kind(), to.Kind(), data)
|
||||
default:
|
||||
return nil, errors.New("invalid decode hook signature")
|
||||
}
|
||||
}
|
||||
|
||||
// ComposeDecodeHookFunc creates a single DecodeHookFunc that
|
||||
// automatically composes multiple DecodeHookFuncs.
|
||||
//
|
||||
// The composed funcs are called in order, with the result of the
|
||||
// previous transformation.
|
||||
func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
var err error
|
||||
for _, f1 := range fs {
|
||||
data, err = DecodeHookExec(f1, f, t, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Modify the from kind to be correct with the new data
|
||||
f = nil
|
||||
if val := reflect.ValueOf(data); val.IsValid() {
|
||||
f = val.Type()
|
||||
}
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
}
|
||||
|
||||
// StringToSliceHookFunc returns a DecodeHookFunc that converts
|
||||
// string to []string by splitting on the given sep.
|
||||
func StringToSliceHookFunc(sep string) DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Kind,
|
||||
t reflect.Kind,
|
||||
data interface{}) (interface{}, error) {
|
||||
if f != reflect.String || t != reflect.Slice {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
raw := data.(string)
|
||||
if raw == "" {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
return strings.Split(raw, sep), nil
|
||||
}
|
||||
}
|
||||
|
||||
// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to time.Duration.
|
||||
func StringToTimeDurationHookFunc() DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
if t != reflect.TypeOf(time.Duration(5)) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
return time.ParseDuration(data.(string))
|
||||
}
|
||||
}
|
||||
|
||||
func WeaklyTypedHook(
|
||||
f reflect.Kind,
|
||||
t reflect.Kind,
|
||||
data interface{}) (interface{}, error) {
|
||||
dataVal := reflect.ValueOf(data)
|
||||
switch t {
|
||||
case reflect.String:
|
||||
switch f {
|
||||
case reflect.Bool:
|
||||
if dataVal.Bool() {
|
||||
return "1", nil
|
||||
} else {
|
||||
return "0", nil
|
||||
}
|
||||
case reflect.Float32:
|
||||
return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
|
||||
case reflect.Int:
|
||||
return strconv.FormatInt(dataVal.Int(), 10), nil
|
||||
case reflect.Slice:
|
||||
dataType := dataVal.Type()
|
||||
elemKind := dataType.Elem().Kind()
|
||||
if elemKind == reflect.Uint8 {
|
||||
return string(dataVal.Interface().([]uint8)), nil
|
||||
}
|
||||
case reflect.Uint:
|
||||
return strconv.FormatUint(dataVal.Uint(), 10), nil
|
||||
}
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
50
vendor/github.com/mitchellh/mapstructure/error.go
generated
vendored
Normal file
50
vendor/github.com/mitchellh/mapstructure/error.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
package mapstructure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Error implements the error interface and can represents multiple
|
||||
// errors that occur in the course of a single decode.
|
||||
type Error struct {
|
||||
Errors []string
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
points := make([]string, len(e.Errors))
|
||||
for i, err := range e.Errors {
|
||||
points[i] = fmt.Sprintf("* %s", err)
|
||||
}
|
||||
|
||||
sort.Strings(points)
|
||||
return fmt.Sprintf(
|
||||
"%d error(s) decoding:\n\n%s",
|
||||
len(e.Errors), strings.Join(points, "\n"))
|
||||
}
|
||||
|
||||
// WrappedErrors implements the errwrap.Wrapper interface to make this
|
||||
// return value more useful with the errwrap and go-multierror libraries.
|
||||
func (e *Error) WrappedErrors() []error {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
result := make([]error, len(e.Errors))
|
||||
for i, e := range e.Errors {
|
||||
result[i] = errors.New(e)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func appendErrors(errors []string, err error) []string {
|
||||
switch e := err.(type) {
|
||||
case *Error:
|
||||
return append(errors, e.Errors...)
|
||||
default:
|
||||
return append(errors, e.Error())
|
||||
}
|
||||
}
|
790
vendor/github.com/mitchellh/mapstructure/mapstructure.go
generated
vendored
Normal file
790
vendor/github.com/mitchellh/mapstructure/mapstructure.go
generated
vendored
Normal file
|
@ -0,0 +1,790 @@
|
|||
// The mapstructure package exposes functionality to convert an
|
||||
// abitrary map[string]interface{} into a native Go structure.
|
||||
//
|
||||
// The Go structure can be arbitrarily complex, containing slices,
|
||||
// other structs, etc. and the decoder will properly decode nested
|
||||
// maps and so on into the proper structures in the native Go struct.
|
||||
// See the examples to see what the decoder is capable of.
|
||||
package mapstructure
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// DecodeHookFunc is the callback function that can be used for
|
||||
// data transformations. See "DecodeHook" in the DecoderConfig
|
||||
// struct.
|
||||
//
|
||||
// The type should be DecodeHookFuncType or DecodeHookFuncKind.
|
||||
// Either is accepted. Types are a superset of Kinds (Types can return
|
||||
// Kinds) and are generally a richer thing to use, but Kinds are simpler
|
||||
// if you only need those.
|
||||
//
|
||||
// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
|
||||
// we started with Kinds and then realized Types were the better solution,
|
||||
// but have a promise to not break backwards compat so we now support
|
||||
// both.
|
||||
type DecodeHookFunc interface{}
|
||||
|
||||
type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
|
||||
type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
|
||||
|
||||
// DecoderConfig is the configuration that is used to create a new decoder
|
||||
// and allows customization of various aspects of decoding.
|
||||
type DecoderConfig struct {
|
||||
// DecodeHook, if set, will be called before any decoding and any
|
||||
// type conversion (if WeaklyTypedInput is on). This lets you modify
|
||||
// the values before they're set down onto the resulting struct.
|
||||
//
|
||||
// If an error is returned, the entire decode will fail with that
|
||||
// error.
|
||||
DecodeHook DecodeHookFunc
|
||||
|
||||
// If ErrorUnused is true, then it is an error for there to exist
|
||||
// keys in the original map that were unused in the decoding process
|
||||
// (extra keys).
|
||||
ErrorUnused bool
|
||||
|
||||
// ZeroFields, if set to true, will zero fields before writing them.
|
||||
// For example, a map will be emptied before decoded values are put in
|
||||
// it. If this is false, a map will be merged.
|
||||
ZeroFields bool
|
||||
|
||||
// If WeaklyTypedInput is true, the decoder will make the following
|
||||
// "weak" conversions:
|
||||
//
|
||||
// - bools to string (true = "1", false = "0")
|
||||
// - numbers to string (base 10)
|
||||
// - bools to int/uint (true = 1, false = 0)
|
||||
// - strings to int/uint (base implied by prefix)
|
||||
// - int to bool (true if value != 0)
|
||||
// - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
|
||||
// FALSE, false, False. Anything else is an error)
|
||||
// - empty array = empty map and vice versa
|
||||
// - negative numbers to overflowed uint values (base 10)
|
||||
// - slice of maps to a merged map
|
||||
//
|
||||
WeaklyTypedInput bool
|
||||
|
||||
// Metadata is the struct that will contain extra metadata about
|
||||
// the decoding. If this is nil, then no metadata will be tracked.
|
||||
Metadata *Metadata
|
||||
|
||||
// Result is a pointer to the struct that will contain the decoded
|
||||
// value.
|
||||
Result interface{}
|
||||
|
||||
// The tag name that mapstructure reads for field names. This
|
||||
// defaults to "mapstructure"
|
||||
TagName string
|
||||
}
|
||||
|
||||
// A Decoder takes a raw interface value and turns it into structured
|
||||
// data, keeping track of rich error information along the way in case
|
||||
// anything goes wrong. Unlike the basic top-level Decode method, you can
|
||||
// more finely control how the Decoder behaves using the DecoderConfig
|
||||
// structure. The top-level Decode method is just a convenience that sets
|
||||
// up the most basic Decoder.
|
||||
type Decoder struct {
|
||||
config *DecoderConfig
|
||||
}
|
||||
|
||||
// Metadata contains information about decoding a structure that
|
||||
// is tedious or difficult to get otherwise.
|
||||
type Metadata struct {
|
||||
// Keys are the keys of the structure which were successfully decoded
|
||||
Keys []string
|
||||
|
||||
// Unused is a slice of keys that were found in the raw value but
|
||||
// weren't decoded since there was no matching field in the result interface
|
||||
Unused []string
|
||||
}
|
||||
|
||||
// Decode takes a map and uses reflection to convert it into the
|
||||
// given Go native structure. val must be a pointer to a struct.
|
||||
func Decode(m interface{}, rawVal interface{}) error {
|
||||
config := &DecoderConfig{
|
||||
Metadata: nil,
|
||||
Result: rawVal,
|
||||
}
|
||||
|
||||
decoder, err := NewDecoder(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return decoder.Decode(m)
|
||||
}
|
||||
|
||||
// WeakDecode is the same as Decode but is shorthand to enable
|
||||
// WeaklyTypedInput. See DecoderConfig for more info.
|
||||
func WeakDecode(input, output interface{}) error {
|
||||
config := &DecoderConfig{
|
||||
Metadata: nil,
|
||||
Result: output,
|
||||
WeaklyTypedInput: true,
|
||||
}
|
||||
|
||||
decoder, err := NewDecoder(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return decoder.Decode(input)
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder for the given configuration. Once
|
||||
// a decoder has been returned, the same configuration must not be used
|
||||
// again.
|
||||
func NewDecoder(config *DecoderConfig) (*Decoder, error) {
|
||||
val := reflect.ValueOf(config.Result)
|
||||
if val.Kind() != reflect.Ptr {
|
||||
return nil, errors.New("result must be a pointer")
|
||||
}
|
||||
|
||||
val = val.Elem()
|
||||
if !val.CanAddr() {
|
||||
return nil, errors.New("result must be addressable (a pointer)")
|
||||
}
|
||||
|
||||
if config.Metadata != nil {
|
||||
if config.Metadata.Keys == nil {
|
||||
config.Metadata.Keys = make([]string, 0)
|
||||
}
|
||||
|
||||
if config.Metadata.Unused == nil {
|
||||
config.Metadata.Unused = make([]string, 0)
|
||||
}
|
||||
}
|
||||
|
||||
if config.TagName == "" {
|
||||
config.TagName = "mapstructure"
|
||||
}
|
||||
|
||||
result := &Decoder{
|
||||
config: config,
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Decode decodes the given raw interface to the target pointer specified
|
||||
// by the configuration.
|
||||
func (d *Decoder) Decode(raw interface{}) error {
|
||||
return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem())
|
||||
}
|
||||
|
||||
// Decodes an unknown data type into a specific reflection value.
|
||||
func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error {
|
||||
if data == nil {
|
||||
// If the data is nil, then we don't set anything.
|
||||
return nil
|
||||
}
|
||||
|
||||
dataVal := reflect.ValueOf(data)
|
||||
if !dataVal.IsValid() {
|
||||
// If the data value is invalid, then we just set the value
|
||||
// to be the zero value.
|
||||
val.Set(reflect.Zero(val.Type()))
|
||||
return nil
|
||||
}
|
||||
|
||||
if d.config.DecodeHook != nil {
|
||||
// We have a DecodeHook, so let's pre-process the data.
|
||||
var err error
|
||||
data, err = DecodeHookExec(
|
||||
d.config.DecodeHook,
|
||||
dataVal.Type(), val.Type(), data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
dataKind := getKind(val)
|
||||
switch dataKind {
|
||||
case reflect.Bool:
|
||||
err = d.decodeBool(name, data, val)
|
||||
case reflect.Interface:
|
||||
err = d.decodeBasic(name, data, val)
|
||||
case reflect.String:
|
||||
err = d.decodeString(name, data, val)
|
||||
case reflect.Int:
|
||||
err = d.decodeInt(name, data, val)
|
||||
case reflect.Uint:
|
||||
err = d.decodeUint(name, data, val)
|
||||
case reflect.Float32:
|
||||
err = d.decodeFloat(name, data, val)
|
||||
case reflect.Struct:
|
||||
err = d.decodeStruct(name, data, val)
|
||||
case reflect.Map:
|
||||
err = d.decodeMap(name, data, val)
|
||||
case reflect.Ptr:
|
||||
err = d.decodePtr(name, data, val)
|
||||
case reflect.Slice:
|
||||
err = d.decodeSlice(name, data, val)
|
||||
default:
|
||||
// If we reached this point then we weren't able to decode it
|
||||
return fmt.Errorf("%s: unsupported type: %s", name, dataKind)
|
||||
}
|
||||
|
||||
// If we reached here, then we successfully decoded SOMETHING, so
|
||||
// mark the key as used if we're tracking metadata.
|
||||
if d.config.Metadata != nil && name != "" {
|
||||
d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// This decodes a basic type (bool, int, string, etc.) and sets the
|
||||
// value to "data" of that type.
|
||||
func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.ValueOf(data)
|
||||
if !dataVal.IsValid() {
|
||||
dataVal = reflect.Zero(val.Type())
|
||||
}
|
||||
|
||||
dataValType := dataVal.Type()
|
||||
if !dataValType.AssignableTo(val.Type()) {
|
||||
return fmt.Errorf(
|
||||
"'%s' expected type '%s', got '%s'",
|
||||
name, val.Type(), dataValType)
|
||||
}
|
||||
|
||||
val.Set(dataVal)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.ValueOf(data)
|
||||
dataKind := getKind(dataVal)
|
||||
|
||||
converted := true
|
||||
switch {
|
||||
case dataKind == reflect.String:
|
||||
val.SetString(dataVal.String())
|
||||
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
|
||||
if dataVal.Bool() {
|
||||
val.SetString("1")
|
||||
} else {
|
||||
val.SetString("0")
|
||||
}
|
||||
case dataKind == reflect.Int && d.config.WeaklyTypedInput:
|
||||
val.SetString(strconv.FormatInt(dataVal.Int(), 10))
|
||||
case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
|
||||
val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
|
||||
case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
|
||||
val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
|
||||
case dataKind == reflect.Slice && d.config.WeaklyTypedInput:
|
||||
dataType := dataVal.Type()
|
||||
elemKind := dataType.Elem().Kind()
|
||||
switch {
|
||||
case elemKind == reflect.Uint8:
|
||||
val.SetString(string(dataVal.Interface().([]uint8)))
|
||||
default:
|
||||
converted = false
|
||||
}
|
||||
default:
|
||||
converted = false
|
||||
}
|
||||
|
||||
if !converted {
|
||||
return fmt.Errorf(
|
||||
"'%s' expected type '%s', got unconvertible type '%s'",
|
||||
name, val.Type(), dataVal.Type())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.ValueOf(data)
|
||||
dataKind := getKind(dataVal)
|
||||
dataType := dataVal.Type()
|
||||
|
||||
switch {
|
||||
case dataKind == reflect.Int:
|
||||
val.SetInt(dataVal.Int())
|
||||
case dataKind == reflect.Uint:
|
||||
val.SetInt(int64(dataVal.Uint()))
|
||||
case dataKind == reflect.Float32:
|
||||
val.SetInt(int64(dataVal.Float()))
|
||||
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
|
||||
if dataVal.Bool() {
|
||||
val.SetInt(1)
|
||||
} else {
|
||||
val.SetInt(0)
|
||||
}
|
||||
case dataKind == reflect.String && d.config.WeaklyTypedInput:
|
||||
i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits())
|
||||
if err == nil {
|
||||
val.SetInt(i)
|
||||
} else {
|
||||
return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
|
||||
}
|
||||
case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
|
||||
jn := data.(json.Number)
|
||||
i, err := jn.Int64()
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"error decoding json.Number into %s: %s", name, err)
|
||||
}
|
||||
val.SetInt(i)
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"'%s' expected type '%s', got unconvertible type '%s'",
|
||||
name, val.Type(), dataVal.Type())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.ValueOf(data)
|
||||
dataKind := getKind(dataVal)
|
||||
|
||||
switch {
|
||||
case dataKind == reflect.Int:
|
||||
i := dataVal.Int()
|
||||
if i < 0 && !d.config.WeaklyTypedInput {
|
||||
return fmt.Errorf("cannot parse '%s', %d overflows uint",
|
||||
name, i)
|
||||
}
|
||||
val.SetUint(uint64(i))
|
||||
case dataKind == reflect.Uint:
|
||||
val.SetUint(dataVal.Uint())
|
||||
case dataKind == reflect.Float32:
|
||||
f := dataVal.Float()
|
||||
if f < 0 && !d.config.WeaklyTypedInput {
|
||||
return fmt.Errorf("cannot parse '%s', %f overflows uint",
|
||||
name, f)
|
||||
}
|
||||
val.SetUint(uint64(f))
|
||||
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
|
||||
if dataVal.Bool() {
|
||||
val.SetUint(1)
|
||||
} else {
|
||||
val.SetUint(0)
|
||||
}
|
||||
case dataKind == reflect.String && d.config.WeaklyTypedInput:
|
||||
i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits())
|
||||
if err == nil {
|
||||
val.SetUint(i)
|
||||
} else {
|
||||
return fmt.Errorf("cannot parse '%s' as uint: %s", name, err)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"'%s' expected type '%s', got unconvertible type '%s'",
|
||||
name, val.Type(), dataVal.Type())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.ValueOf(data)
|
||||
dataKind := getKind(dataVal)
|
||||
|
||||
switch {
|
||||
case dataKind == reflect.Bool:
|
||||
val.SetBool(dataVal.Bool())
|
||||
case dataKind == reflect.Int && d.config.WeaklyTypedInput:
|
||||
val.SetBool(dataVal.Int() != 0)
|
||||
case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
|
||||
val.SetBool(dataVal.Uint() != 0)
|
||||
case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
|
||||
val.SetBool(dataVal.Float() != 0)
|
||||
case dataKind == reflect.String && d.config.WeaklyTypedInput:
|
||||
b, err := strconv.ParseBool(dataVal.String())
|
||||
if err == nil {
|
||||
val.SetBool(b)
|
||||
} else if dataVal.String() == "" {
|
||||
val.SetBool(false)
|
||||
} else {
|
||||
return fmt.Errorf("cannot parse '%s' as bool: %s", name, err)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"'%s' expected type '%s', got unconvertible type '%s'",
|
||||
name, val.Type(), dataVal.Type())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.ValueOf(data)
|
||||
dataKind := getKind(dataVal)
|
||||
dataType := dataVal.Type()
|
||||
|
||||
switch {
|
||||
case dataKind == reflect.Int:
|
||||
val.SetFloat(float64(dataVal.Int()))
|
||||
case dataKind == reflect.Uint:
|
||||
val.SetFloat(float64(dataVal.Uint()))
|
||||
case dataKind == reflect.Float32:
|
||||
val.SetFloat(float64(dataVal.Float()))
|
||||
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
|
||||
if dataVal.Bool() {
|
||||
val.SetFloat(1)
|
||||
} else {
|
||||
val.SetFloat(0)
|
||||
}
|
||||
case dataKind == reflect.String && d.config.WeaklyTypedInput:
|
||||
f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits())
|
||||
if err == nil {
|
||||
val.SetFloat(f)
|
||||
} else {
|
||||
return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
|
||||
}
|
||||
case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
|
||||
jn := data.(json.Number)
|
||||
i, err := jn.Float64()
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"error decoding json.Number into %s: %s", name, err)
|
||||
}
|
||||
val.SetFloat(i)
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"'%s' expected type '%s', got unconvertible type '%s'",
|
||||
name, val.Type(), dataVal.Type())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
|
||||
valType := val.Type()
|
||||
valKeyType := valType.Key()
|
||||
valElemType := valType.Elem()
|
||||
|
||||
// By default we overwrite keys in the current map
|
||||
valMap := val
|
||||
|
||||
// If the map is nil or we're purposely zeroing fields, make a new map
|
||||
if valMap.IsNil() || d.config.ZeroFields {
|
||||
// Make a new map to hold our result
|
||||
mapType := reflect.MapOf(valKeyType, valElemType)
|
||||
valMap = reflect.MakeMap(mapType)
|
||||
}
|
||||
|
||||
// Check input type
|
||||
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
||||
if dataVal.Kind() != reflect.Map {
|
||||
// In weak mode, we accept a slice of maps as an input...
|
||||
if d.config.WeaklyTypedInput {
|
||||
switch dataVal.Kind() {
|
||||
case reflect.Array, reflect.Slice:
|
||||
// Special case for BC reasons (covered by tests)
|
||||
if dataVal.Len() == 0 {
|
||||
val.Set(valMap)
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := 0; i < dataVal.Len(); i++ {
|
||||
err := d.decode(
|
||||
fmt.Sprintf("%s[%d]", name, i),
|
||||
dataVal.Index(i).Interface(), val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
|
||||
}
|
||||
|
||||
// Accumulate errors
|
||||
errors := make([]string, 0)
|
||||
|
||||
for _, k := range dataVal.MapKeys() {
|
||||
fieldName := fmt.Sprintf("%s[%s]", name, k)
|
||||
|
||||
// First decode the key into the proper type
|
||||
currentKey := reflect.Indirect(reflect.New(valKeyType))
|
||||
if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
|
||||
errors = appendErrors(errors, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Next decode the data into the proper type
|
||||
v := dataVal.MapIndex(k).Interface()
|
||||
currentVal := reflect.Indirect(reflect.New(valElemType))
|
||||
if err := d.decode(fieldName, v, currentVal); err != nil {
|
||||
errors = appendErrors(errors, err)
|
||||
continue
|
||||
}
|
||||
|
||||
valMap.SetMapIndex(currentKey, currentVal)
|
||||
}
|
||||
|
||||
// Set the built up map to the value
|
||||
val.Set(valMap)
|
||||
|
||||
// If we had errors, return those
|
||||
if len(errors) > 0 {
|
||||
return &Error{errors}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error {
|
||||
// Create an element of the concrete (non pointer) type and decode
|
||||
// into that. Then set the value of the pointer to this type.
|
||||
valType := val.Type()
|
||||
valElemType := valType.Elem()
|
||||
realVal := reflect.New(valElemType)
|
||||
if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
val.Set(realVal)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
||||
dataValKind := dataVal.Kind()
|
||||
valType := val.Type()
|
||||
valElemType := valType.Elem()
|
||||
sliceType := reflect.SliceOf(valElemType)
|
||||
|
||||
// Check input type
|
||||
if dataValKind != reflect.Array && dataValKind != reflect.Slice {
|
||||
// Accept empty map instead of array/slice in weakly typed mode
|
||||
if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 {
|
||||
val.Set(reflect.MakeSlice(sliceType, 0, 0))
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf(
|
||||
"'%s': source data must be an array or slice, got %s", name, dataValKind)
|
||||
}
|
||||
}
|
||||
|
||||
// Make a new slice to hold our result, same size as the original data.
|
||||
valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
|
||||
|
||||
// Accumulate any errors
|
||||
errors := make([]string, 0)
|
||||
|
||||
for i := 0; i < dataVal.Len(); i++ {
|
||||
currentData := dataVal.Index(i).Interface()
|
||||
currentField := valSlice.Index(i)
|
||||
|
||||
fieldName := fmt.Sprintf("%s[%d]", name, i)
|
||||
if err := d.decode(fieldName, currentData, currentField); err != nil {
|
||||
errors = appendErrors(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Finally, set the value to the slice we built up
|
||||
val.Set(valSlice)
|
||||
|
||||
// If there were errors, we return those
|
||||
if len(errors) > 0 {
|
||||
return &Error{errors}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
||||
|
||||
// If the type of the value to write to and the data match directly,
|
||||
// then we just set it directly instead of recursing into the structure.
|
||||
if dataVal.Type() == val.Type() {
|
||||
val.Set(dataVal)
|
||||
return nil
|
||||
}
|
||||
|
||||
dataValKind := dataVal.Kind()
|
||||
if dataValKind != reflect.Map {
|
||||
return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind)
|
||||
}
|
||||
|
||||
dataValType := dataVal.Type()
|
||||
if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
|
||||
return fmt.Errorf(
|
||||
"'%s' needs a map with string keys, has '%s' keys",
|
||||
name, dataValType.Key().Kind())
|
||||
}
|
||||
|
||||
dataValKeys := make(map[reflect.Value]struct{})
|
||||
dataValKeysUnused := make(map[interface{}]struct{})
|
||||
for _, dataValKey := range dataVal.MapKeys() {
|
||||
dataValKeys[dataValKey] = struct{}{}
|
||||
dataValKeysUnused[dataValKey.Interface()] = struct{}{}
|
||||
}
|
||||
|
||||
errors := make([]string, 0)
|
||||
|
||||
// This slice will keep track of all the structs we'll be decoding.
|
||||
// There can be more than one struct if there are embedded structs
|
||||
// that are squashed.
|
||||
structs := make([]reflect.Value, 1, 5)
|
||||
structs[0] = val
|
||||
|
||||
// Compile the list of all the fields that we're going to be decoding
|
||||
// from all the structs.
|
||||
fields := make(map[*reflect.StructField]reflect.Value)
|
||||
for len(structs) > 0 {
|
||||
structVal := structs[0]
|
||||
structs = structs[1:]
|
||||
|
||||
structType := structVal.Type()
|
||||
|
||||
for i := 0; i < structType.NumField(); i++ {
|
||||
fieldType := structType.Field(i)
|
||||
fieldKind := fieldType.Type.Kind()
|
||||
|
||||
if fieldType.Anonymous {
|
||||
if fieldKind != reflect.Struct {
|
||||
errors = appendErrors(errors,
|
||||
fmt.Errorf("%s: unsupported type: %s", fieldType.Name, fieldKind))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// If "squash" is specified in the tag, we squash the field down.
|
||||
squash := false
|
||||
tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
|
||||
for _, tag := range tagParts[1:] {
|
||||
if tag == "squash" {
|
||||
squash = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if squash {
|
||||
if fieldKind != reflect.Struct {
|
||||
errors = appendErrors(errors,
|
||||
fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind))
|
||||
} else {
|
||||
structs = append(structs, val.FieldByName(fieldType.Name))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Normal struct field, store it away
|
||||
fields[&fieldType] = structVal.Field(i)
|
||||
}
|
||||
}
|
||||
|
||||
for fieldType, field := range fields {
|
||||
fieldName := fieldType.Name
|
||||
|
||||
tagValue := fieldType.Tag.Get(d.config.TagName)
|
||||
tagValue = strings.SplitN(tagValue, ",", 2)[0]
|
||||
if tagValue != "" {
|
||||
fieldName = tagValue
|
||||
}
|
||||
|
||||
rawMapKey := reflect.ValueOf(fieldName)
|
||||
rawMapVal := dataVal.MapIndex(rawMapKey)
|
||||
if !rawMapVal.IsValid() {
|
||||
// Do a slower search by iterating over each key and
|
||||
// doing case-insensitive search.
|
||||
for dataValKey, _ := range dataValKeys {
|
||||
mK, ok := dataValKey.Interface().(string)
|
||||
if !ok {
|
||||
// Not a string key
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.EqualFold(mK, fieldName) {
|
||||
rawMapKey = dataValKey
|
||||
rawMapVal = dataVal.MapIndex(dataValKey)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !rawMapVal.IsValid() {
|
||||
// There was no matching key in the map for the value in
|
||||
// the struct. Just ignore.
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Delete the key we're using from the unused map so we stop tracking
|
||||
delete(dataValKeysUnused, rawMapKey.Interface())
|
||||
|
||||
if !field.IsValid() {
|
||||
// This should never happen
|
||||
panic("field is not valid")
|
||||
}
|
||||
|
||||
// If we can't set the field, then it is unexported or something,
|
||||
// and we just continue onwards.
|
||||
if !field.CanSet() {
|
||||
continue
|
||||
}
|
||||
|
||||
// If the name is empty string, then we're at the root, and we
|
||||
// don't dot-join the fields.
|
||||
if name != "" {
|
||||
fieldName = fmt.Sprintf("%s.%s", name, fieldName)
|
||||
}
|
||||
|
||||
if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil {
|
||||
errors = appendErrors(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
|
||||
keys := make([]string, 0, len(dataValKeysUnused))
|
||||
for rawKey, _ := range dataValKeysUnused {
|
||||
keys = append(keys, rawKey.(string))
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
|
||||
errors = appendErrors(errors, err)
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return &Error{errors}
|
||||
}
|
||||
|
||||
// Add the unused keys to the list of unused keys if we're tracking metadata
|
||||
if d.config.Metadata != nil {
|
||||
for rawKey, _ := range dataValKeysUnused {
|
||||
key := rawKey.(string)
|
||||
if name != "" {
|
||||
key = fmt.Sprintf("%s.%s", name, key)
|
||||
}
|
||||
|
||||
d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getKind(val reflect.Value) reflect.Kind {
|
||||
kind := val.Kind()
|
||||
|
||||
switch {
|
||||
case kind >= reflect.Int && kind <= reflect.Int64:
|
||||
return reflect.Int
|
||||
case kind >= reflect.Uint && kind <= reflect.Uint64:
|
||||
return reflect.Uint
|
||||
case kind >= reflect.Float32 && kind <= reflect.Float64:
|
||||
return reflect.Float32
|
||||
default:
|
||||
return kind
|
||||
}
|
||||
}
|
21
vendor/github.com/sethgrid/pester/LICENSE.md
generated
vendored
Normal file
21
vendor/github.com/sethgrid/pester/LICENSE.md
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) SendGrid 2016
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
126
vendor/github.com/sethgrid/pester/README.md
generated
vendored
Normal file
126
vendor/github.com/sethgrid/pester/README.md
generated
vendored
Normal file
|
@ -0,0 +1,126 @@
|
|||
# pester
|
||||
|
||||
`pester` wraps Go's standard lib http client to provide several options to increase resiliency in your request. If you experience poor network conditions or requests could experience varied delays, you can now pester the endpoint for data.
|
||||
- Send out multiple requests and get the first back (only used for GET calls)
|
||||
- Retry on errors
|
||||
- Backoff
|
||||
|
||||
### Simple Example
|
||||
Use `pester` where you would use the http client calls. By default, pester will use a concurrency of 1, and retry the endpoint 3 times with the `DefaultBackoff` strategy of waiting 1 second between retries.
|
||||
```go
|
||||
/* swap in replacement, just switch
|
||||
http.{Get|Post|PostForm|Head|Do} to
|
||||
pester.{Get|Post|PostForm|Head|Do}
|
||||
*/
|
||||
resp, err := pester.Get("http://sethammons.com")
|
||||
```
|
||||
|
||||
### Backoff Strategy
|
||||
Provide your own backoff strategy, or use one of the provided built in strategies:
|
||||
- `DefaultBackoff`: 1 second
|
||||
- `LinearBackoff`: n seconds where n is the retry number
|
||||
- `LinearJitterBackoff`: n seconds where n is the retry number, +/- 0-33%
|
||||
- `ExponentialBackoff`: n seconds where n is 2^(retry number)
|
||||
- `ExponentialJitterBackoff`: n seconds where n is 2^(retry number), +/- 0-33%
|
||||
|
||||
```go
|
||||
client := pester.New()
|
||||
client.Backoff = func(retry int) time.Duration {
|
||||
// set up something dynamic or use a look up table
|
||||
return time.Duration(retry) * time.Minute
|
||||
}
|
||||
```
|
||||
|
||||
### Complete example
|
||||
For a complete and working example, see the sample directory.
|
||||
`pester` allows you to use a constructor to control:
|
||||
- backoff strategy
|
||||
- reties
|
||||
- concurrency
|
||||
- keeping a log for debugging
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/sethgrid/pester"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.Println("Starting...")
|
||||
|
||||
{ // drop in replacement for http.Get and other client methods
|
||||
resp, err := pester.Get("http://example.com")
|
||||
if err != nil {
|
||||
log.Println("error GETing example.com", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
log.Printf("example.com %s", resp.Status)
|
||||
}
|
||||
|
||||
{ // control the resiliency
|
||||
client := pester.New()
|
||||
client.Concurrency = 3
|
||||
client.MaxRetries = 5
|
||||
client.Backoff = pester.ExponentialBackoff
|
||||
client.KeepLog = true
|
||||
|
||||
resp, err := client.Get("http://example.com")
|
||||
if err != nil {
|
||||
log.Println("error GETing example.com", client.LogString())
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
log.Printf("example.com %s", resp.Status)
|
||||
}
|
||||
|
||||
{ // use the pester version of http.Client.Do
|
||||
req, err := http.NewRequest("POST", "http://example.com", strings.NewReader("data"))
|
||||
if err != nil {
|
||||
log.Fatal("Unable to create a new http request", err)
|
||||
}
|
||||
resp, err := pester.Do(req)
|
||||
if err != nil {
|
||||
log.Println("error POSTing example.com", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
log.Printf("example.com %s", resp.Status)
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
### Example Log
|
||||
`pester` also allows you to control the resiliency and can optionally log the errors.
|
||||
```go
|
||||
c := pester.New()
|
||||
c.KeepLog = true
|
||||
|
||||
nonExistantURL := "http://localhost:9000/foo"
|
||||
_, _ = c.Get(nonExistantURL)
|
||||
|
||||
fmt.Println(c.LogString())
|
||||
/*
|
||||
Output:
|
||||
|
||||
1432402837 Get [GET] http://localhost:9000/foo request-0 retry-0 error: Get http://localhost:9000/foo: dial tcp 127.0.0.1:9000: connection refused
|
||||
1432402838 Get [GET] http://localhost:9000/foo request-0 retry-1 error: Get http://localhost:9000/foo: dial tcp 127.0.0.1:9000: connection refused
|
||||
1432402839 Get [GET] http://localhost:9000/foo request-0 retry-2 error: Get http://localhost:9000/foo: dial tcp 127.0.0.1:9000: connection refused
|
||||
*/
|
||||
```
|
||||
|
||||
### Tests
|
||||
|
||||
You can run tests in the root directory with `$ go test`. There is a benchmark-like test available with `$ cd benchmarks; go test`.
|
||||
You can see `pester` in action with `$ cd sample; go run main.go`.
|
||||
|
||||
For watching open file descriptors, you can run `watch "lsof -i -P | grep main"` if you started the app with `go run main.go`.
|
||||
I did this for watching for FD leaks. My method was to alter `sample/main.go` to only run one case (`pester.Get with set backoff stategy, concurrency and retries increased`)
|
||||
and adding a sleep after the result came back. This let me verify if FDs were getting left open when they should have closed. If you know a better way, let me know!
|
||||
I was able to see that FDs are now closing when they should :)
|
||||
|
||||
![Are we there yet?](http://butchbellah.com/wp-content/uploads/2012/06/Are-We-There-Yet.jpg)
|
||||
|
||||
Are we there yet? Are we there yet? Are we there yet? Are we there yet? ...
|
445
vendor/github.com/sethgrid/pester/main.go
generated
vendored
Normal file
445
vendor/github.com/sethgrid/pester/main.go
generated
vendored
Normal file
|
@ -0,0 +1,445 @@
|
|||
// Package pester provides additional resiliency over the standard http client methods by
|
||||
// allowing you to control concurrency, retries, and a backoff strategy.
|
||||
package pester
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
//ErrUnexpectedMethod occurs when an http.Client method is unable to be mapped from a calling method in the pester client
|
||||
var ErrUnexpectedMethod = errors.New("unexpected client method, must be one of Do, Get, Head, Post, or PostFrom")
|
||||
|
||||
// ErrReadingBody happens when we cannot read the body bytes
|
||||
var ErrReadingBody = errors.New("error reading body")
|
||||
|
||||
// ErrReadingRequestBody happens when we cannot read the request body bytes
|
||||
var ErrReadingRequestBody = errors.New("error reading request body")
|
||||
|
||||
// Client wraps the http client and exposes all the functionality of the http.Client.
|
||||
// Additionally, Client provides pester specific values for handling resiliency.
|
||||
type Client struct {
|
||||
// wrap it to provide access to http built ins
|
||||
hc *http.Client
|
||||
|
||||
Transport http.RoundTripper
|
||||
CheckRedirect func(req *http.Request, via []*http.Request) error
|
||||
Jar http.CookieJar
|
||||
Timeout time.Duration
|
||||
|
||||
// pester specific
|
||||
Concurrency int
|
||||
MaxRetries int
|
||||
Backoff BackoffStrategy
|
||||
KeepLog bool
|
||||
LogHook LogHook
|
||||
|
||||
SuccessReqNum int
|
||||
SuccessRetryNum int
|
||||
|
||||
wg *sync.WaitGroup
|
||||
|
||||
sync.Mutex
|
||||
ErrLog []ErrEntry
|
||||
}
|
||||
|
||||
// ErrEntry is used to provide the LogString() data and is populated
|
||||
// each time an error happens if KeepLog is set.
|
||||
// ErrEntry.Retry is deprecated in favor of ErrEntry.Attempt
|
||||
type ErrEntry struct {
|
||||
Time time.Time
|
||||
Method string
|
||||
URL string
|
||||
Verb string
|
||||
Request int
|
||||
Retry int
|
||||
Attempt int
|
||||
Err error
|
||||
}
|
||||
|
||||
// result simplifies the channel communication for concurrent request handling
|
||||
type result struct {
|
||||
resp *http.Response
|
||||
err error
|
||||
req int
|
||||
retry int
|
||||
}
|
||||
|
||||
// params represents all the params needed to run http client calls and pester errors
|
||||
type params struct {
|
||||
method string
|
||||
verb string
|
||||
req *http.Request
|
||||
url string
|
||||
bodyType string
|
||||
body io.Reader
|
||||
data url.Values
|
||||
}
|
||||
|
||||
var random *rand.Rand
|
||||
|
||||
func init() {
|
||||
random = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
}
|
||||
|
||||
// New constructs a new DefaultClient with sensible default values
|
||||
func New() *Client {
|
||||
return &Client{
|
||||
Concurrency: DefaultClient.Concurrency,
|
||||
MaxRetries: DefaultClient.MaxRetries,
|
||||
Backoff: DefaultClient.Backoff,
|
||||
ErrLog: DefaultClient.ErrLog,
|
||||
wg: &sync.WaitGroup{},
|
||||
}
|
||||
}
|
||||
|
||||
// NewExtendedClient allows you to pass in an http.Client that is previously set up
|
||||
// and extends it to have Pester's features of concurrency and retries.
|
||||
func NewExtendedClient(hc *http.Client) *Client {
|
||||
c := New()
|
||||
c.hc = hc
|
||||
return c
|
||||
}
|
||||
|
||||
// PrintErrStrategy is used to log attempts as they happen.
|
||||
// You know, more visible
|
||||
type LogHook func(e ErrEntry)
|
||||
|
||||
// BackoffStrategy is used to determine how long a retry request should wait until attempted
|
||||
type BackoffStrategy func(retry int) time.Duration
|
||||
|
||||
// DefaultClient provides sensible defaults
|
||||
var DefaultClient = &Client{Concurrency: 1, MaxRetries: 3, Backoff: DefaultBackoff, ErrLog: []ErrEntry{}}
|
||||
|
||||
// DefaultBackoff always returns 1 second
|
||||
func DefaultBackoff(_ int) time.Duration {
|
||||
return 1 * time.Second
|
||||
}
|
||||
|
||||
// ExponentialBackoff returns ever increasing backoffs by a power of 2
|
||||
func ExponentialBackoff(i int) time.Duration {
|
||||
return time.Duration(1<<uint(i)) * time.Second
|
||||
}
|
||||
|
||||
// ExponentialJitterBackoff returns ever increasing backoffs by a power of 2
|
||||
// with +/- 0-33% to prevent sychronized reuqests.
|
||||
func ExponentialJitterBackoff(i int) time.Duration {
|
||||
return jitter(int(1 << uint(i)))
|
||||
}
|
||||
|
||||
// LinearBackoff returns increasing durations, each a second longer than the last
|
||||
func LinearBackoff(i int) time.Duration {
|
||||
return time.Duration(i) * time.Second
|
||||
}
|
||||
|
||||
// LinearJitterBackoff returns increasing durations, each a second longer than the last
|
||||
// with +/- 0-33% to prevent sychronized reuqests.
|
||||
func LinearJitterBackoff(i int) time.Duration {
|
||||
return jitter(i)
|
||||
}
|
||||
|
||||
// jitter keeps the +/- 0-33% logic in one place
|
||||
func jitter(i int) time.Duration {
|
||||
ms := i * 1000
|
||||
|
||||
maxJitter := ms / 3
|
||||
|
||||
// ms ± rand
|
||||
ms += random.Intn(2*maxJitter) - maxJitter
|
||||
|
||||
// a jitter of 0 messes up the time.Tick chan
|
||||
if ms <= 0 {
|
||||
ms = 1
|
||||
}
|
||||
|
||||
return time.Duration(ms) * time.Millisecond
|
||||
}
|
||||
|
||||
// Wait blocks until all pester requests have returned
|
||||
// Probably not that useful outside of testing.
|
||||
func (c *Client) Wait() {
|
||||
c.wg.Wait()
|
||||
}
|
||||
|
||||
// pester provides all the logic of retries, concurrency, backoff, and logging
|
||||
func (c *Client) pester(p params) (*http.Response, error) {
|
||||
resultCh := make(chan result)
|
||||
multiplexCh := make(chan result)
|
||||
finishCh := make(chan struct{})
|
||||
|
||||
// track all requests that go out so we can close the late listener routine that closes late incoming response bodies
|
||||
totalSentRequests := &sync.WaitGroup{}
|
||||
totalSentRequests.Add(1)
|
||||
defer totalSentRequests.Done()
|
||||
allRequestsBackCh := make(chan struct{})
|
||||
go func() {
|
||||
totalSentRequests.Wait()
|
||||
close(allRequestsBackCh)
|
||||
}()
|
||||
|
||||
// GET calls should be idempotent and can make use
|
||||
// of concurrency. Other verbs can mutate and should not
|
||||
// make use of the concurrency feature
|
||||
concurrency := c.Concurrency
|
||||
if p.verb != "GET" {
|
||||
concurrency = 1
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
if c.hc == nil {
|
||||
c.hc = &http.Client{}
|
||||
c.hc.Transport = c.Transport
|
||||
c.hc.CheckRedirect = c.CheckRedirect
|
||||
c.hc.Jar = c.Jar
|
||||
c.hc.Timeout = c.Timeout
|
||||
}
|
||||
c.Unlock()
|
||||
|
||||
// re-create the http client so we can leverage the std lib
|
||||
httpClient := http.Client{
|
||||
Transport: c.hc.Transport,
|
||||
CheckRedirect: c.hc.CheckRedirect,
|
||||
Jar: c.hc.Jar,
|
||||
Timeout: c.hc.Timeout,
|
||||
}
|
||||
|
||||
// if we have a request body, we need to save it for later
|
||||
var originalRequestBody []byte
|
||||
var originalBody []byte
|
||||
var err error
|
||||
if p.req != nil && p.req.Body != nil {
|
||||
originalRequestBody, err = ioutil.ReadAll(p.req.Body)
|
||||
if err != nil {
|
||||
return nil, ErrReadingRequestBody
|
||||
}
|
||||
p.req.Body.Close()
|
||||
}
|
||||
if p.body != nil {
|
||||
originalBody, err = ioutil.ReadAll(p.body)
|
||||
if err != nil {
|
||||
return nil, ErrReadingBody
|
||||
}
|
||||
}
|
||||
|
||||
AttemptLimit := c.MaxRetries
|
||||
if AttemptLimit <= 0 {
|
||||
AttemptLimit = 1
|
||||
}
|
||||
|
||||
for req := 0; req < concurrency; req++ {
|
||||
c.wg.Add(1)
|
||||
totalSentRequests.Add(1)
|
||||
go func(n int, p params) {
|
||||
defer c.wg.Done()
|
||||
defer totalSentRequests.Done()
|
||||
|
||||
var err error
|
||||
for i := 1; i <= AttemptLimit; i++ {
|
||||
c.wg.Add(1)
|
||||
defer c.wg.Done()
|
||||
select {
|
||||
case <-finishCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// rehydrate the body (it is drained each read)
|
||||
if len(originalRequestBody) > 0 {
|
||||
p.req.Body = ioutil.NopCloser(bytes.NewBuffer(originalRequestBody))
|
||||
}
|
||||
if len(originalBody) > 0 {
|
||||
p.body = bytes.NewBuffer(originalBody)
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
// route the calls
|
||||
switch p.method {
|
||||
case "Do":
|
||||
resp, err = httpClient.Do(p.req)
|
||||
case "Get":
|
||||
resp, err = httpClient.Get(p.url)
|
||||
case "Head":
|
||||
resp, err = httpClient.Head(p.url)
|
||||
case "Post":
|
||||
resp, err = httpClient.Post(p.url, p.bodyType, p.body)
|
||||
case "PostForm":
|
||||
resp, err = httpClient.PostForm(p.url, p.data)
|
||||
default:
|
||||
err = ErrUnexpectedMethod
|
||||
}
|
||||
|
||||
// Early return if we have a valid result
|
||||
// Only retry (ie, continue the loop) on 5xx status codes
|
||||
if err == nil && resp.StatusCode < 500 {
|
||||
multiplexCh <- result{resp: resp, err: err, req: n, retry: i}
|
||||
return
|
||||
}
|
||||
|
||||
c.log(ErrEntry{
|
||||
Time: time.Now(),
|
||||
Method: p.method,
|
||||
Verb: p.verb,
|
||||
URL: p.url,
|
||||
Request: n,
|
||||
Retry: i + 1, // would remove, but would break backward compatibility
|
||||
Attempt: i,
|
||||
Err: err,
|
||||
})
|
||||
|
||||
// if it is the last iteration, grab the result (which is an error at this point)
|
||||
if i == AttemptLimit {
|
||||
multiplexCh <- result{resp: resp, err: err}
|
||||
return
|
||||
}
|
||||
|
||||
// if we are retrying, we should close this response body to free the fd
|
||||
if resp != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
// prevent a 0 from causing the tick to block, pass additional microsecond
|
||||
<-time.After(c.Backoff(i) + 1*time.Microsecond)
|
||||
}
|
||||
}(req, p)
|
||||
}
|
||||
|
||||
// spin off the go routine so it can continually listen in on late results and close the response bodies
|
||||
go func() {
|
||||
gotFirstResult := false
|
||||
for {
|
||||
select {
|
||||
case res := <-multiplexCh:
|
||||
if !gotFirstResult {
|
||||
gotFirstResult = true
|
||||
close(finishCh)
|
||||
resultCh <- res
|
||||
} else if res.resp != nil {
|
||||
// we only return one result to the caller; close all other response bodies that come back
|
||||
// drain the body before close as to not prevent keepalive. see https://gist.github.com/mholt/eba0f2cc96658be0f717
|
||||
io.Copy(ioutil.Discard, res.resp.Body)
|
||||
res.resp.Body.Close()
|
||||
}
|
||||
case <-allRequestsBackCh:
|
||||
// don't leave this goroutine running
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
res := <-resultCh
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.SuccessReqNum = res.req
|
||||
c.SuccessRetryNum = res.retry
|
||||
return res.resp, res.err
|
||||
|
||||
}
|
||||
|
||||
// LogString provides a string representation of the errors the client has seen
|
||||
func (c *Client) LogString() string {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
var res string
|
||||
for _, e := range c.ErrLog {
|
||||
res += c.FormatError(e)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Format the Error to human readable string
|
||||
func (c *Client) FormatError(e ErrEntry) string {
|
||||
return fmt.Sprintf("%d %s [%s] %s request-%d retry-%d error: %s\n",
|
||||
e.Time.Unix(), e.Method, e.Verb, e.URL, e.Request, e.Retry, e.Err)
|
||||
}
|
||||
|
||||
// LogErrCount is a helper method used primarily for test validation
|
||||
func (c *Client) LogErrCount() int {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
return len(c.ErrLog)
|
||||
}
|
||||
|
||||
// EmbedHTTPClient allows you to extend an existing Pester client with an
|
||||
// underlying http.Client, such as https://godoc.org/golang.org/x/oauth2/google#DefaultClient
|
||||
func (c *Client) EmbedHTTPClient(hc *http.Client) {
|
||||
c.hc = hc
|
||||
}
|
||||
|
||||
func (c *Client) log(e ErrEntry) {
|
||||
if c.KeepLog {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.ErrLog = append(c.ErrLog, e)
|
||||
} else if c.LogHook != nil {
|
||||
// NOTE: There is a possibility that Log Printing hook slows it down.
|
||||
// but the consumer can always do the Job in a go-routine.
|
||||
c.LogHook(e)
|
||||
}
|
||||
}
|
||||
|
||||
// Do provides the same functionality as http.Client.Do
|
||||
func (c *Client) Do(req *http.Request) (resp *http.Response, err error) {
|
||||
return c.pester(params{method: "Do", req: req, verb: req.Method, url: req.URL.String()})
|
||||
}
|
||||
|
||||
// Get provides the same functionality as http.Client.Get
|
||||
func (c *Client) Get(url string) (resp *http.Response, err error) {
|
||||
return c.pester(params{method: "Get", url: url, verb: "GET"})
|
||||
}
|
||||
|
||||
// Head provides the same functionality as http.Client.Head
|
||||
func (c *Client) Head(url string) (resp *http.Response, err error) {
|
||||
return c.pester(params{method: "Head", url: url, verb: "HEAD"})
|
||||
}
|
||||
|
||||
// Post provides the same functionality as http.Client.Post
|
||||
func (c *Client) Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) {
|
||||
return c.pester(params{method: "Post", url: url, bodyType: bodyType, body: body, verb: "POST"})
|
||||
}
|
||||
|
||||
// PostForm provides the same functionality as http.Client.PostForm
|
||||
func (c *Client) PostForm(url string, data url.Values) (resp *http.Response, err error) {
|
||||
return c.pester(params{method: "PostForm", url: url, data: data, verb: "POST"})
|
||||
}
|
||||
|
||||
////////////////////////////////////////
|
||||
// Provide self-constructing variants //
|
||||
////////////////////////////////////////
|
||||
|
||||
// Do provides the same functionality as http.Client.Do and creates its own constructor
|
||||
func Do(req *http.Request) (resp *http.Response, err error) {
|
||||
c := New()
|
||||
return c.Do(req)
|
||||
}
|
||||
|
||||
// Get provides the same functionality as http.Client.Get and creates its own constructor
|
||||
func Get(url string) (resp *http.Response, err error) {
|
||||
c := New()
|
||||
return c.Get(url)
|
||||
}
|
||||
|
||||
// Head provides the same functionality as http.Client.Head and creates its own constructor
|
||||
func Head(url string) (resp *http.Response, err error) {
|
||||
c := New()
|
||||
return c.Head(url)
|
||||
}
|
||||
|
||||
// Post provides the same functionality as http.Client.Post and creates its own constructor
|
||||
func Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) {
|
||||
c := New()
|
||||
return c.Post(url, bodyType, body)
|
||||
}
|
||||
|
||||
// PostForm provides the same functionality as http.Client.PostForm and creates its own constructor
|
||||
func PostForm(url string, data url.Values) (resp *http.Response, err error) {
|
||||
c := New()
|
||||
return c.PostForm(url, data)
|
||||
}
|
3
vendor/golang.org/x/net/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/net/AUTHORS
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
3
vendor/golang.org/x/net/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/golang.org/x/net/CONTRIBUTORS
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
27
vendor/golang.org/x/net/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/net/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/golang.org/x/net/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/net/PATENTS
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
2
vendor/golang.org/x/net/http2/.gitignore
generated
vendored
Normal file
2
vendor/golang.org/x/net/http2/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
*~
|
||||
h2i/h2i
|
51
vendor/golang.org/x/net/http2/Dockerfile
generated
vendored
Normal file
51
vendor/golang.org/x/net/http2/Dockerfile
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
#
|
||||
# This Dockerfile builds a recent curl with HTTP/2 client support, using
|
||||
# a recent nghttp2 build.
|
||||
#
|
||||
# See the Makefile for how to tag it. If Docker and that image is found, the
|
||||
# Go tests use this curl binary for integration tests.
|
||||
#
|
||||
|
||||
FROM ubuntu:trusty
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get upgrade -y && \
|
||||
apt-get install -y git-core build-essential wget
|
||||
|
||||
RUN apt-get install -y --no-install-recommends \
|
||||
autotools-dev libtool pkg-config zlib1g-dev \
|
||||
libcunit1-dev libssl-dev libxml2-dev libevent-dev \
|
||||
automake autoconf
|
||||
|
||||
# The list of packages nghttp2 recommends for h2load:
|
||||
RUN apt-get install -y --no-install-recommends make binutils \
|
||||
autoconf automake autotools-dev \
|
||||
libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
|
||||
libev-dev libevent-dev libjansson-dev libjemalloc-dev \
|
||||
cython python3.4-dev python-setuptools
|
||||
|
||||
# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
|
||||
ENV NGHTTP2_VER 895da9a
|
||||
RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
|
||||
|
||||
WORKDIR /root/nghttp2
|
||||
RUN git reset --hard $NGHTTP2_VER
|
||||
RUN autoreconf -i
|
||||
RUN automake
|
||||
RUN autoconf
|
||||
RUN ./configure
|
||||
RUN make
|
||||
RUN make install
|
||||
|
||||
WORKDIR /root
|
||||
RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz
|
||||
RUN tar -zxvf curl-7.45.0.tar.gz
|
||||
WORKDIR /root/curl-7.45.0
|
||||
RUN ./configure --with-ssl --with-nghttp2=/usr/local
|
||||
RUN make
|
||||
RUN make install
|
||||
RUN ldconfig
|
||||
|
||||
CMD ["-h"]
|
||||
ENTRYPOINT ["/usr/local/bin/curl"]
|
||||
|
3
vendor/golang.org/x/net/http2/Makefile
generated
vendored
Normal file
3
vendor/golang.org/x/net/http2/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
curlimage:
|
||||
docker build -t gohttp2/curl .
|
||||
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue